text
stringlengths
2
999k
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for various tensorflow.ops.tf.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.framework import importer from tensorflow.python.ops import array_ops # TODO(zongheng): it'd be great to factor out this function and various random # SparseTensor gen funcs. def _sparsify(x, thresh=0.5, index_dtype=np.int64): x[x < thresh] = 0 non_zero = np.where(x) x_indices = np.vstack(non_zero).astype(index_dtype).T x_values = x[non_zero] x_shape = x.shape return tf.SparseTensor( indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values) class ShapeOpsTest(tf.test.TestCase): def _compareShape(self, x, use_gpu=False): np_ans = np.array(np.shape(x)) with self.test_session(use_gpu=use_gpu): tf_ans = tf.shape(x) tf_ans_64 = tf.shape(x, out_type=tf.int64) result = tf_ans.eval() result_64 = tf_ans_64.eval() self.assertAllEqual(np_ans, result) self.assertAllEqual(np_ans, result_64) self.assertShapeEqual(np_ans, tf_ans) def _compareShapeSparse(self, x_np, use_gpu=False): np_ans = np.array(np.shape(x_np)) x_tf, unused_nnz = _sparsify(x_np) with self.test_session(use_gpu=use_gpu): tf_ans = tf.shape(x_tf) result = tf_ans.eval() self.assertAllEqual(np_ans, result) self.assertShapeEqual(np_ans, tf_ans) def _compareShapeN(self, x, use_gpu=False): np_ans = np.array(np.shape(x)) with self.test_session(use_gpu=use_gpu) as sess: tf_ans = tf.shape_n([x, x, x]) tf_ans_64 = tf.shape_n([x, x, x], out_type=tf.int64) result = sess.run(tf_ans) result_64 = sess.run(tf_ans_64) for i in range(3): self.assertAllEqual(np_ans, result[i]) self.assertAllEqual(np_ans, result_64[i]) self.assertShapeEqual(np_ans, tf_ans[i]) def _compareRank(self, x, use_gpu=False): np_ans = np.asarray(np.ndim(x)) with self.test_session(use_gpu=use_gpu): tf_ans = tf.rank(x) result = tf_ans.eval() self.assertAllEqual(np_ans, result) self.assertShapeEqual(np_ans, tf_ans) def _compareRankSparse(self, x_np, use_gpu=False): np_ans = np.asarray(np.ndim(x_np)) x_tf, unused_nnz = _sparsify(x_np) with self.test_session(use_gpu=use_gpu): tf_ans = tf.rank(x_tf) result = tf_ans.eval() self.assertAllEqual(np_ans, result) self.assertShapeEqual(np_ans, tf_ans) def _compareSize(self, x, use_gpu=False): np_ans = np.asarray(np.size(x)) with self.test_session(use_gpu=use_gpu): tf_ans = tf.size(x) result = tf_ans.eval() tf_ans_64 = tf.size(x, out_type=tf.int64) result_64 = tf_ans_64.eval() self.assertAllEqual(np_ans, result) self.assertAllEqual(np_ans, result_64) self.assertShapeEqual(np_ans, tf_ans) def _compareSizeSparse(self, x_np, use_gpu=False): np_ans = np.asarray(np.size(x_np)) x_tf, unused_nnz = _sparsify(x_np) with self.test_session(use_gpu=use_gpu): tf_ans = tf.size(x_tf) result = tf_ans.eval() self.assertAllEqual(np_ans, result) self.assertShapeEqual(np_ans, tf_ans) def _testCpu(self, x): self._compareShape(x, use_gpu=False) self._compareShapeN(x, use_gpu=False) self._compareRank(x, use_gpu=False) self._compareSize(x, use_gpu=False) self._compareShapeSparse(x, use_gpu=False) self._compareRankSparse(x, use_gpu=False) self._compareSizeSparse(x, use_gpu=False) def _testGpu(self, x): self._compareShape(x, use_gpu=True) self._compareShapeN(x, use_gpu=True) self._compareRank(x, use_gpu=True) self._compareSize(x, use_gpu=True) self._compareShapeSparse(x, use_gpu=True) self._compareRankSparse(x, use_gpu=True) self._compareSizeSparse(x, use_gpu=True) def _testAll(self, x): self._testCpu(x) self._testGpu(x) def testBasic(self): self._testAll(np.random.randn(2)) self._testAll(np.random.randn(2, 3)) self._testAll(np.random.randn(2, 3, 5)) self._testAll(np.random.randn(2, 3, 5, 7)) self._testAll(np.random.randn(2, 3, 5, 7, 11)) self._testAll(np.random.randn(2, 3, 5, 7, 11, 13)) # Disabled because it takes too long to run, but manually verified # as passing at time of writing. def _test64BitOutput(self): with self.test_session(): inp = tf.zeros([2**31]) num_elements = array_ops.size_internal( inp, optimize=False, out_type=tf.int64) self.assertEqual(2**31, num_elements.eval()) # Too large for tf.int32 output. with self.assertRaises(tf.errors.InvalidArgumentError): with self.test_session(): inp = tf.zeros([2**31]) num_elements = array_ops.size_internal( inp, optimize=False, out_type=tf.int32) self.assertEqual(2**31, num_elements.eval()) def _compareExpandDims(self, x, dim, use_gpu): np_ans = np.expand_dims(x, axis=dim) with self.test_session(use_gpu=use_gpu): tensor = tf.expand_dims(x, dim) tf_ans = tensor.eval() self.assertShapeEqual(np_ans, tensor) self.assertAllEqual(np_ans, tf_ans) def _compareExpandDimsAll(self, x, dim): self._compareExpandDims(x, dim, False) self._compareExpandDims(x, dim, True) def testExpandDims(self): self._compareExpandDimsAll(np.zeros([2]), 0) self._compareExpandDimsAll(np.zeros([2]), 1) self._compareExpandDimsAll(np.zeros([2]), -1) self._compareExpandDimsAll(np.zeros([2, 3]), 0) self._compareExpandDimsAll(np.zeros([2, 3]), 1) self._compareExpandDimsAll(np.zeros([2, 3]), 2) self._compareExpandDimsAll(np.zeros([2, 3]), -1) self._compareExpandDimsAll(np.zeros([2, 3]), -2) self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0) self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1) self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2) self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3) self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1) self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2) self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3) self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4) def testExpandDimsErrors(self): with self.test_session(): self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5) self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4) def testExpandDimsGradient(self): with self.test_session(): inp = tf.constant(np.random.rand(4, 2).astype("f"), dtype=tf.float32) squeezed = tf.expand_dims(inp, 1) err = tf.test.compute_gradient_error(inp, [4, 2], squeezed, [4, 1, 2]) self.assertLess(err, 1e-3) def testExpandDimsScalar(self): with self.test_session(): inp = tf.constant(7) self.assertAllEqual([7], tf.expand_dims(inp, 0).eval()) self.assertAllEqual([7], tf.expand_dims(inp, -1).eval()) def _compareSqueeze(self, x, squeeze_dims, use_gpu): with self.test_session(use_gpu=use_gpu): if squeeze_dims: np_ans = np.squeeze(x, axis=tuple(squeeze_dims)) tensor = tf.squeeze(x, squeeze_dims) tf_ans = tensor.eval() else: np_ans = np.squeeze(x) tensor = tf.squeeze(x) tf_ans = tensor.eval() self.assertShapeEqual(np_ans, tensor) self.assertAllEqual(np_ans, tf_ans) def _compareSqueezeAll(self, x, squeeze_dims=None): if squeeze_dims is None: squeeze_dims = [] self._compareSqueeze(x, squeeze_dims, False) self._compareSqueeze(x, squeeze_dims, True) def testSqueeze(self): # Nothing to squeeze. self._compareSqueezeAll(np.zeros([2])) self._compareSqueezeAll(np.zeros([2, 3])) # Squeeze the middle element away. self._compareSqueezeAll(np.zeros([2, 1, 2])) # Squeeze on both ends. self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1])) def testSqueezeSpecificDimension(self): # Positive squeeze dim index. self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0]) self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4]) self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2]) # Negative squeeze dim index. self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1]) self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5]) self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1]) def testSqueezeAllOnes(self): # Numpy squeezes a 1 element tensor into a zero dimensional tensor. # Verify that we do the same. for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): tensor = tf.squeeze(np.zeros([1, 1, 1]), []) self.assertEqual(np.shape(1), tensor.get_shape()) tf_ans = tensor.eval() self.assertEqual(np.shape(1), tf_ans.shape) def testSqueezeOnlyOnes(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): input_1x1x3 = np.zeros([1, 1, 3]) self._compareSqueezeAll(input_1x1x3) self._compareSqueezeAll(input_1x1x3, [0]) self._compareSqueezeAll(input_1x1x3, [1]) self.assertRaises(ValueError, tf.squeeze, input_1x1x3, [2]) def testSqueezeErrors(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [-4]) self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [0, -4]) self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [3]) self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [2, 3]) def testSqueezeGradient(self): with self.test_session(): inp = np.random.rand(4, 2).astype("f") a = tf.reshape(inp, [4, 1, 2]) squeezed = tf.squeeze(a, []) err = tf.test.compute_gradient_error(a, [4, 1, 2], squeezed, [4, 2]) self.assertLess(err, 1e-3) def testSqueezeGradientWithSqueezeDims(self): with self.test_session(): inp = np.random.rand(4, 2).astype("f") a = tf.reshape(inp, [4, 1, 2, 1]) squeezed = tf.squeeze(a, [1]) err = tf.test.compute_gradient_error(a, [4, 1, 2, 1], squeezed, [4, 2, 1]) self.assertLess(err, 1e-3) def testSqueezeWithUnknownShape(self): with self.test_session(): a = tf.placeholder(tf.float32, shape=[2, None]) squeezed = tf.squeeze(a, [1]) self.assertEqual([2], squeezed.get_shape().as_list()) squeezed = tf.squeeze(a) self.assertEqual(None, squeezed.get_shape()) self.assertRaises(ValueError, tf.squeeze, a, [0]) self.assertRaises(ValueError, tf.squeeze, a, [100]) class TileTest(tf.test.TestCase): def testScalar(self): for use_gpu in False, True: with self.test_session(use_gpu=use_gpu): a = tf.constant(7, shape=[], dtype=tf.float32) tiled = tf.tile(a, []) result = tiled.eval() self.assertEqual(result.shape, ()) self.assertEqual([], tiled.get_shape()) self.assertEqual(7, result) def testSimple(self): with self.test_session(): inp = np.random.rand(4, 1).astype(np.float32) a = tf.constant(inp) tiled = tf.tile(a, [1, 4]) result = tiled.eval() self.assertEqual(result.shape, (4, 4)) self.assertEqual([4, 4], tiled.get_shape()) self.assertTrue((result == np.tile(inp, (1, 4))).all()) def testEmpty(self): with self.test_session(): inp = np.random.rand(2, 3).astype(np.float32) a = tf.constant(inp) tiled = tf.tile(a, [5, 0]) result = tiled.eval() self.assertEqual(result.shape, (10, 0)) self.assertEqual([10, 0], tiled.get_shape()) def testUnknownInputShape(self): """Importing can call _TileShape without shape of <multiples> known.""" with self.test_session(): inp = tf.placeholder(tf.float32) # unknown shape multiples = tf.constant([1, 2, 3, 4], dtype=np.int32) tiled = tf.tile(inp, multiples) gdef = tiled.graph.as_graph_def() # Move the tile op to the start of the graph so that shapes of its inputs # are not available when the shape function runs on import. swapped = False for i, n in enumerate(gdef.node): if n.op == "Tile": # Swap tile op to be first in gdef.node assert i != 0 new_node = tf.NodeDef() new_node.CopyFrom(gdef.node[i]) gdef.node[i].CopyFrom(gdef.node[0]) gdef.node[0].CopyFrom(new_node) swapped = True assert swapped tiled_imported, = importer.import_graph_def(gdef, return_elements=[tiled.name]) self.assertEqual(4, tiled_imported.get_shape().ndims) def testTypes(self): types_to_test = { "bool": (tf.bool, bool), "float32": (tf.float32, float), "float64": (tf.float64, float), "complex64": (tf.complex64, complex), "complex128": (tf.complex128, complex), "uint8": (tf.uint8, int), "int32": (tf.int32, int), "int64": (tf.int64, int), bytes: (tf.string, bytes) } for dtype_np, (dtype_tf, cast) in types_to_test.items(): with self.test_session(use_gpu=True): inp = np.random.rand(4, 1).astype(dtype_np) a = tf.constant([cast(x) for x in inp.ravel(order="C")], shape=[4, 1], dtype=dtype_tf) tiled = tf.tile(a, [1, 4]) result = tiled.eval() self.assertEqual(result.shape, (4, 4)) self.assertEqual([4, 4], tiled.get_shape()) self.assertAllEqual(result, np.tile(inp, (1, 4))) def testInvalidDim(self): with self.test_session(): inp = np.random.rand(4, 1).astype("f") a = tf.constant([float(x) for x in inp.ravel(order="C")], shape=[4, 1], dtype=tf.float32) # Wrong length of multiples. with self.assertRaises(ValueError): tf.tile(a, [1, 4, 2]) # Wrong rank for multiples. with self.assertRaises(ValueError): tf.tile(a, [[2, 3], [3, 4]]).eval() def _RunAndVerifyResult(self, use_gpu): with self.test_session(use_gpu=use_gpu): # Random dims of rank 5 input_shape = np.random.randint(1, 4, size=5) inp = np.random.rand(*input_shape).astype("f") a = tf.constant([float(x) for x in inp.ravel(order="C")], shape=input_shape, dtype=tf.float32) multiples = np.random.randint(1, 4, size=5).astype(np.int32) tiled = tf.tile(a, multiples) result = tiled.eval() self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(result.shape)).all()) self.assertAllEqual(result, np.tile(inp, tuple(multiples))) self.assertShapeEqual(result, tiled) def testRandom(self): for _ in range(5): self._RunAndVerifyResult(use_gpu=False) for _ in range(5): self._RunAndVerifyResult(use_gpu=True) def testGradientSimpleReduction(self): with self.test_session(): inp = np.random.rand(4, 1).astype("f") a = tf.constant([float(x) for x in inp.flatten()], shape=[4, 1], dtype=tf.float32) tiled = tf.tile(a, [1, 4]) grad_shape = [4, 4] grad_inp = np.random.rand(*grad_shape).astype("f") grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()], shape=grad_shape) grad = tf.gradients([tiled], [a], [grad_tensor])[0] self.assertShapeEqual(inp, grad) result = grad.eval() self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3) def testGradientStridedReduction(self): with self.test_session(): inp = np.random.rand(4, 2).astype("f") a = tf.constant([float(x) for x in inp.flatten()], shape=[4, 2], dtype=tf.float32) tiled = tf.tile(a, [1, 2]) grad_shape = [4, 4] grad_inp = np.random.rand(*grad_shape).astype("f") grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()], shape=grad_shape) grad = tf.gradients([tiled], [a], [grad_tensor])[0] self.assertShapeEqual(inp, grad) result = grad.eval() expected_shape = [4, 2] expected = np.zeros(expected_shape) expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2] expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3] self.assertTrue((np.abs(expected - result) < 1e-3).all()) def testGradientSimpleReductionOnGPU(self): with self.test_session(use_gpu=True): inp = np.random.rand(4, 1).astype("f") a = tf.constant([float(x) for x in inp.flatten()], shape=[4, 1], dtype=tf.float32) tiled = tf.tile(a, [1, 4]) grad_shape = [4, 4] grad_inp = np.random.rand(*grad_shape).astype("f") grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()], shape=grad_shape) grad = tf.gradients([tiled], [a], [grad_tensor])[0] result = grad.eval() self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3) def testGradientStridedReductionOnGPU(self): with self.test_session(use_gpu=True): inp = np.random.rand(4, 2).astype("f") a = tf.constant([float(x) for x in inp.flatten()], shape=[4, 2], dtype=tf.float32) tiled = tf.tile(a, [1, 2]) grad_shape = [4, 4] grad_inp = np.random.rand(*grad_shape).astype("f") grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()], shape=grad_shape) grad = tf.gradients([tiled], [a], [grad_tensor])[0] result = grad.eval() expected_shape = [4, 2] expected = np.zeros(expected_shape) expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2] expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3] self.assertAllClose(expected, result, 1e-3) def _RunAndVerifyGradientResult(self, input_shape, multiples): for use_gpu in False, True: with self.test_session(use_gpu=use_gpu): # Random values inp = np.asarray(np.random.rand(*input_shape)) a = tf.constant(inp, dtype=tf.float64) tiled = tf.tile(a, multiples) grad_shape = list(np.array(multiples) * np.array(inp.shape)) err = tf.test.compute_gradient_error(a, list(input_shape), tiled, grad_shape, x_init_value=inp) print("tile(float) error = ", err) self.assertLess(err, 1e-3) def testGradientRandomScalar(self): self._RunAndVerifyGradientResult([], []) def testGradientRandom(self): self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1]) self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2]) self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2]) def testGradientStridedReductionGC(self): with self.test_session(): inp = np.random.rand(4, 2).astype("f") a = tf.constant([float(x) for x in inp.flatten()], shape=[4, 2], dtype=tf.float32) tiled = tf.tile(a, [1, 2]) err = tf.test.compute_gradient_error(a, [4, 2], tiled, [4, 4]) self.assertLess(err, 1e-3) def testShapeFunctionEdgeCases(self): # Unknown multiples shape. inp = tf.constant(0.0, shape=[4, 4, 4, 4]) tiled = tf.tile(inp, tf.placeholder(tf.int32)) self.assertEqual([None, None, None, None], tiled.get_shape().as_list()) # Unknown input shape. inp = tf.placeholder(tf.float32) tiled = tf.tile(inp, [2, 2, 2, 2]) self.assertEqual([None, None, None, None], tiled.get_shape().as_list()) # Unknown input and multiples shape. inp = tf.placeholder(tf.float32) tiled = tf.tile(inp, tf.placeholder(tf.int32)) self.assertIs(None, tiled.get_shape().ndims) # Known input and partially known multiples. inp = tf.constant(0.0, shape=[1, 1]) tiled = tf.tile(inp, [tf.placeholder(tf.int32), 7]) self.assertEqual([None, 7], tiled.get_shape().as_list()) # Mismatched input rank and multiples length. inp = tf.placeholder(tf.float32, shape=[None, None]) with self.assertRaises(ValueError): tiled = tf.tile(inp, tf.placeholder(tf.int32, shape=[3])) if __name__ == "__main__": tf.test.main()
#!/usr/bin/env vpython # -*- coding: UTF-8 -*- # # Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Asserts that expected and generated list of GRD files is equal. """ import sys import json def main(argv): expected_grd_sources = open(argv[1], 'r').read() generated_grd_sources = open(argv[2], 'r').read() stamp_file = argv[3] expected_json = json.loads(expected_grd_sources) generated_json = json.loads(generated_grd_sources) expected_json.sort() generated_json.sort() return_code = 0 for expected_file in expected_json: if expected_file not in generated_json: print( "File " + expected_file + " is not generated by any action in front_end." + " Either remove it from config/gni/devtools_grd_files.gni" + " or add the missing file to an action" + " (for example a devtools_module or devtools_entrypoint definition).\n" ) return_code = 1 for generated_file in generated_json: if generated_file not in expected_json: print( "File " + generated_file + " is not listed in config/gni/devtools_grd_files.gni." + " Either add the file to the grd_files_release_sources/grd_files_debug_sources," + " or remove the generated file from an action.\n") return_code = 1 with open(stamp_file, 'w', encoding="utf8") as fp: fp.write("") return return_code if __name__ == '__main__': sys.exit(main(sys.argv))
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Dict, Optional, Sequence, Tuple, Union import mmcv import numpy as np import torch from torch.utils.data import Dataset from mmdeploy.codebase.base import BaseTask from mmdeploy.utils import Task, get_input_shape from .mmsegmentation import MMSEG_TASK def process_model_config(model_cfg: mmcv.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None): """Process the model config. Args: model_cfg (mmcv.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmcv.Config: the model config after processing. """ from mmseg.apis.inference import LoadImage cfg = model_cfg.copy() if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' # for static exporting if input_shape is not None: cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) cfg.data.test.pipeline[1]['transforms'][0]['keep_ratio'] = False cfg.data.test.pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] return cfg @MMSEG_TASK.register_module(Task.SEGMENTATION.value) class Segmentation(BaseTask): """Segmentation task class. Args: model_cfg (mmcv.Config): Original PyTorch model config file. deploy_cfg (mmcv.Config): Deployment config file or loaded Config object. device (str): A string represents device type. """ def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, device: str): super(Segmentation, self).__init__(model_cfg, deploy_cfg, device) def init_backend_model(self, model_files: Optional[str] = None, **kwargs) -> torch.nn.Module: """Initialize backend model. Args: model_files (Sequence[str]): Input model files. Returns: nn.Module: An initialized backend model. """ from .segmentation_model import build_segmentation_model model = build_segmentation_model( model_files, self.model_cfg, self.deploy_cfg, device=self.device) return model.eval() def init_pytorch_model(self, model_checkpoint: Optional[str] = None, cfg_options: Optional[Dict] = None, **kwargs) -> torch.nn.Module: """Initialize torch model. Args: model_checkpoint (str): The checkpoint file of torch model, defaults to `None`. cfg_options (dict): Optional config key-pair parameters. Returns: nn.Module: An initialized torch model generated by OpenMMLab codebases. """ from mmseg.apis import init_segmentor from mmcv.cnn.utils import revert_sync_batchnorm model = init_segmentor(self.model_cfg, model_checkpoint, self.device) model = revert_sync_batchnorm(model) return model.eval() def create_input(self, imgs: Union[str, np.ndarray], input_shape: Sequence[int] = None) \ -> Tuple[Dict, torch.Tensor]: """Create input for segmentor. Args: imgs (Any): Input image(s), accepted data type are `str`, `np.ndarray`, `torch.Tensor`. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Defaults to `None`. Returns: tuple: (data, img), meta information for the input image and input. """ from mmseg.datasets.pipelines import Compose from mmcv.parallel import collate, scatter if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = process_model_config(self.model_cfg, imgs, input_shape) test_pipeline = Compose(cfg.data.test.pipeline) data_list = [] for img in imgs: # prepare data data = dict(img=img) # build the data pipeline data = test_pipeline(data) data_list.append(data) data = collate(data_list, samples_per_gpu=len(imgs)) data['img_metas'] = [ img_metas.data[0] for img_metas in data['img_metas'] ] data['img'] = [img.data[0][None, :] for img in data['img']] if self.device != 'cpu': data = scatter(data, [self.device])[0] return data, data['img'] def visualize(self, model, image: Union[str, np.ndarray], result: list, output_file: str, window_name: str = '', show_result: bool = False, opacity: float = 0.5): """Visualize predictions of a model. Args: model (nn.Module): Input model. image (str | np.ndarray): Input image to draw predictions on. result (list): A list of predictions. output_file (str): Output file to save drawn image. window_name (str): The name of visualization window. Defaults to an empty string. show_result (bool): Whether to show result in windows, defaults to `False`. opacity: (float): Opacity of painted segmentation map. Defaults to `0.5`. """ show_img = mmcv.imread(image) if isinstance(image, str) else image output_file = None if show_result else output_file # Need to wrapper the result with list for mmseg result = [result] model.show_result( show_img, result, out_file=output_file, win_name=window_name, show=show_result, opacity=opacity) @staticmethod def run_inference(model, model_inputs: Dict[str, torch.Tensor]): """Run inference once for a segmentation model of mmseg. Args: model (nn.Module): Input model. model_inputs (dict): A dict containing model inputs tensor and meta info. Returns: list: The predictions of model inference. """ return model(**model_inputs, return_loss=False, rescale=True) @staticmethod def get_partition_cfg(partition_type: str) -> Dict: raise NotImplementedError('Not supported yet.') @staticmethod def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: """Get input tensor from input data. Args: input_data (dict): Input data containing meta info and image tensor. Returns: torch.Tensor: An image in `Tensor`. """ return input_data['img'][0] @staticmethod def evaluate_outputs(model_cfg, outputs: Sequence, dataset: Dataset, metrics: Optional[str] = None, out: Optional[str] = None, metric_options: Optional[dict] = None, format_only: bool = False, log_file: Optional[str] = None): """Perform post-processing to predictions of model. Args: outputs (list): A list of predictions of model inference. dataset (Dataset): Input dataset to run test. model_cfg (mmcv.Config): The model config. metrics (str): Evaluation metrics, which depends on the codebase and the dataset, e.g., e.g., "mIoU" for generic datasets, and "cityscapes" for Cityscapes in mmseg. out (str): Output result file in pickle format, defaults to `None`. metric_options (dict): Custom options for evaluation, will be kwargs for dataset.evaluate() function. Defaults to `None`. format_only (bool): Format the output results without perform evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. Defaults to `False`. log_file (str | None): The file to write the evaluation results. Defaults to `None` and the results will only print on stdout. """ from mmcv.utils import get_logger logger = get_logger('test', log_file=log_file) if out: logger.debug(f'writing results to {out}') mmcv.dump(outputs, out) kwargs = {} if metric_options is None else metric_options if format_only: dataset.format_results(outputs, **kwargs) if metrics: dataset.evaluate(outputs, metrics, logger=logger, **kwargs) def get_preprocess(self) -> Dict: """Get the preprocess information for SDK. Return: dict: Composed of the preprocess information. """ input_shape = get_input_shape(self.deploy_cfg) load_from_file = self.model_cfg.data.test.pipeline[0] model_cfg = process_model_config(self.model_cfg, [''], input_shape) preprocess = model_cfg.data.test.pipeline preprocess[0] = load_from_file return preprocess def get_postprocess(self) -> Dict: """Get the postprocess information for SDK. Return: dict: Nonthing for super resolution. """ postprocess = self.model_cfg.model.decode_head return postprocess def get_model_name(self) -> str: """Get the model name. Return: str: the name of the model. """ assert 'decode_head' in self.model_cfg.model, 'model config contains' ' no decode_head' name = self.model_cfg.model.decode_head.type[:-4].lower() return name
from Crypto.PublicKey import RSA key = RSA.generate(2048) private_key = key.export_key() file_out = open("private.pem", "wb") file_out.write(private_key) public_key = key.publickey().export_key() file_out = open("receiver.pem", "wb") file_out.write(public_key)
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 3.1.4. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '9f%wm(3l6@qcw(bt92#g5$o+i$%qwo%n_&&&796_ir3nmi$103' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'blog.apps.BlogConfig', 'extensions' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'fa-dr' TIME_ZONE = 'Asia/Kabul' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR / 'media'
""" This file offers the methods to automatically retrieve the graph Flagellimonas eckloniae. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def FlagellimonasEckloniae( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Flagellimonas eckloniae graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Flagellimonas eckloniae graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="FlagellimonasEckloniae", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
# -*- coding: UTF-8 -*- """ This file is part of SENSE. (c) 2016- Alexander Loew For COPYING and LICENSE details, please refer to the LICENSE file """ from distutils.core import setup # use distutils as this allows to build extensions in placee import os # import glob import numpy as np import json # from setuptools import setup #, Extension from setuptools import find_packages # Always prefer setuptools over distutils # from Cython.Distutils import build_ext #from Cython.Build import cythonize def xxx_get_current_version(): ppath = os.path.dirname(os.path.realpath(__file__)) return json.load(open(ppath + os.sep + 'geoval' + os.sep + 'version.json')) def get_current_version(): return '0.1' def get_packages(): return find_packages() setup(name='sense', version=get_current_version(), description='xxx', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). # packages=find_packages(exclude=['contrib', 'docs', 'tests*']), packages=get_packages(), #~ package_dir={'pycmbs': 'pycmbs'}, #~ package_data={'pycmbs': ['benchmarking/configuration/*', #~ 'benchmarking/logo/*', 'version.json']}, author="Alexander Loew", author_email='alexander.loew@lmu.de', maintainer='Alexander Loew', maintainer_email='alexander.loew@lmu.de', license='APACHE 2', url='https://github.com/pygeo/sense', long_description='xxxxx', # List run-time dependencies here. These will be installed by pip when your # project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files # install_requires=install_requires, keywords=["data"], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target # platform. #~ entry_points={ #~ 'console_scripts': [ #~ 'pycmbs_benchmarking = pycmbs_benchmarking:main' #~ ]}, # See https://pypi.python.org/pypi?%3Aaction=list_classifiers include_dirs=[np.get_include()] ) ######################################################################## # Some useful information on shipping packages ######################################################################## # PIP # 1) on a new computer you need to create a .pypirc file like described in the # pypi documentation # 2) install twine using pip install twine # 3) generate package using: python setup.py sdist # 4) just upload using twine upload dist/*
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gslib.help_provider import HELP_NAME from gslib.help_provider import HELP_NAME_ALIASES from gslib.help_provider import HELP_ONE_LINE_SUMMARY from gslib.help_provider import HelpProvider from gslib.help_provider import HELP_TEXT from gslib.help_provider import HelpType from gslib.help_provider import HELP_TYPE _detailed_help_text = (""" <B>OVERVIEW</B> We're open to incorporating gsutil code changes authored by users. Here are some guidelines: 1. Before we can accept code submissions, we have to jump a couple of legal hurdles. Please fill out either the individual or corporate Contributor License Agreement: - If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an individual CLA (http://code.google.com/legal/individual-cla-v1.0.html). - If you work for a company that wants to allow you to contribute your work to gsutil, then you'll need to sign a corporate CLA (http://code.google.com/legal/corporate-cla-v1.0.html) Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll add you to the official list of contributors and be able to accept your patches. 2. If you found a bug or have an idea for a feature enhancement, we suggest you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it has already been reported by another user. From there you can also subscribe to updates to the issue by clicking the "Watch thread" button at the bottom of the page. 3. It's usually worthwhile to send email to gs-team@google.com about your idea before sending actual code. Often we can discuss the idea and help propose things that could save you later revision work. 4. We tend to avoid adding command line options that are of use to only a very small fraction of users, especially if there's some other way to accommodate such needs. Adding such options complicates the code and also adds overhead to users having to read through an "alphabet soup" list of option documentation. 5. While gsutil has a number of features specific to Google Cloud Storage, it can also be used with other cloud storage providers. We're open to including changes for making gsutil support features specific to other providers, as long as those changes don't make gsutil work worse for Google Cloud Storage. If you do make such changes we recommend including someone with knowledge of the specific provider as a code reviewer (see below). 6. You can check out the gsutil code from the GitHub repository: https://github.com/GoogleCloudPlatform/gsutil To clone a read-only copy of the repository: git clone git://github.com/GoogleCloudPlatform/gsutil.git git submodule update --init --recursive To push your own changes to GitHub, click the Fork button on the repository page and clone the repository from your own fork. 7. The gsutil git repository uses git submodules to pull in external modules. After checking out the repository, make sure to also pull the submodules by entering into the gsutil top-level directory and run: git submodule update --init --recursive 8. Please make sure to run all tests against your modified code. To do this, change directories into the gsutil top-level directory and run: ./gsutil test The above tests take a long time to run because they send many requests to the production service. The gsutil test command has a -u argument that will only run unit tests. These run quickly, as they are executed with an in-memory mock storage service implementation. To run only the unit tests, run: ./gsutil test -u If you made changes to boto, please run the boto tests. For these tests you need to use HMAC credentials (from gsutil config -a), because the current boto test suite doesn't import the OAuth2 handler. You'll also need to install some python modules. Change directories into the boto root directory at third_party/boto and run: pip install -r requirements.txt (You probably need to run this command using sudo.) Make sure each of the individual installations succeeded. If they don't you may need to run the install command again. Then ensure your .boto file has HMAC credentials defined (the boto tests don't load the OAUTH2 plugin), and then change directories into boto's tests directory and run: python test.py unit python test.py -t s3 -t gs -t ssl 9. Please consider contributing test code for your change, especially if the change impacts any of the core gsutil code (like the gsutil cp command). 10. When it's time to send us code, please use the Rietveld code review tool rather than simply sending us a code patch. Do this as follows: - Check out the gsutil code from your fork of the gsutil repository and apply your changes. - Download the "upload.py" script from http://code.google.com/p/rietveld/wiki/UploadPyUsage - Run upload.py from your git directory with the changes. - Click the codereview.appspot.com link it generates, click "Edit Issue", and add mfschwartz@google.com as a reviewer, and Cc gs-team@google.com. - Click Publish+Mail Comments. - Once your changes are accepted, submit a pull request on GitHub and we will merge your commits. """) class CommandOptions(HelpProvider): """Additional help about contributing code to gsutil.""" help_spec = { # Name of command or auxiliary help info for which this help applies. HELP_NAME : 'dev', # List of help name aliases. HELP_NAME_ALIASES : ['development', 'developer', 'code', 'mods', 'software'], # Type of help: HELP_TYPE : HelpType.ADDITIONAL_HELP, # One line summary of this help. HELP_ONE_LINE_SUMMARY : 'Contributing Code to gsutil', # The full help text. HELP_TEXT : _detailed_help_text, }
''' Data preparation and feature extraction of arXiv dataset available at https://www.kaggle.com/neelshah18/arxivdataset ''' import argparse import beautifultable as bt from bert_serving.client import BertClient import collections from gensim.models import Word2Vec import pandas as pd import networkx as nx import numpy as np import re from sklearn.model_selection import train_test_split from src import node2vec import time import os LABELS_FP = "data/labels.tsv" TITLE_FEATURES_FP = "data/titles_features.tsv" PARAGRAPHS_FEATURES_FP = "data/paragraphs_features.tsv" NODE_FEATURES_FP = "data/node_features.tsv" TITLES_FP = "data/titles.tsv" IDs_FP = "data/ids.tsv" RANDOM_SEED = 1234 def parse_args(): ''' Parses the arguments. ''' parser = argparse.ArgumentParser(description="http://pyvandenbussche.info/2019/ai-or-not-ai-classifying-arxiv-articles-with-bert/") parser.add_argument('--input', nargs='?', default='data/arxivData.json', help='Input Arxiv json file') return parser.parse_args() def load_data(input_file, tag_of_interest): ''' Load arXiv data :param input_file: path to arxiv file ''' # read the arxiv input json file df = pd.read_json(input_file, orient='records') # flatten author list names. # this is not the most elegant but is made to handle the variation in single/double quotes for name values: # "author": "[{'name': 'Luciano Serafini'}, {'name': \"Artur d'Avila Garcez\"}]", df['author_list'] = df['author'].apply(lambda author_str: [x.strip()[10:-2] for x in author_str[1:-1].split(",")]) # flatten tags list def flatten_tags(tag_str): tags = tag_str[1:-1].split("{'term': '") tags = list(filter(None, [tag.strip()[:tag.find("'")] for tag in tags])) return tags df['tags_list'] = df['tag'].apply(flatten_tags) unique_lists_tags = df['tags_list'].values df['Y'] = [int((tag_of_interest in tags) == True) for tags in unique_lists_tags] print("\t- found {} articles with tag {}".format(df['Y'].sum(), tag_of_interest)) return df def get_titles(df): titles = df["title"].values.tolist() titles = [title.replace('\n ', '').replace('\r', '').lower() for title in titles] return np.array(titles) def get_sentences_embedding(sentences): ''' Query bert server and get back sentence embeddings for each article's title :param titles: articles titles :return: np array of features ''' bc = BertClient() X = bc.encode(sentences) return X def main(args): ''' Pipeline for representational learning for all nodes in the ArXiv graph. ''' print("Loading Arxiv Data") # load data: df = load_data(input_file=args.input, tag_of_interest="cs.CV") # df = load_data(input_file=args.input, tag_of_interest="cs.AI") print("Saving labels Y") np.savetxt(LABELS_FP, df["Y"].values, fmt='%i', delimiter='\t') print("Saving titles and ids") if os.path.isfile(TITLES_FP) & os.path.isfile(IDs_FP): titles = get_titles(df) print("\t- Files already exist. Will reuse them") else: titles = get_titles(df) np.savetxt(TITLES_FP, titles, fmt='%s', delimiter='\t') np.savetxt(IDs_FP, df["id"].values, fmt='%s', delimiter='\t') print("Computing titles embeddings") if os.path.isfile(TITLE_FEATURES_FP): print("\t- File already exists. Will reuse it") title_embed = np.loadtxt(TITLE_FEATURES_FP, delimiter="\t") else: title_embed = get_sentences_embedding(titles) # print(title_embed[0]) print("\t- Saving title features to file") np.savetxt(TITLE_FEATURES_FP, title_embed, delimiter='\t') print("Computing paragraphs embeddings") if os.path.isfile(PARAGRAPHS_FEATURES_FP): print("\t- File already exists. Will reuse it") else: with open(PARAGRAPHS_FEATURES_FP, 'w') as f: for index, row in df.iterrows(): # Split Paragraph on basis of '.' or ? or !. sentences = re.split(r"\.|\?|\!", row["summary"]) sentences = [sentence.replace('\n ', '').replace('\r', '').strip() for sentence in sentences] sentences = list(filter(None, sentences)) sent_embed = get_sentences_embedding(sentences) par_embed = np.average(sent_embed, axis=0) np.savetxt(f, par_embed[None], delimiter='\t') if index % 100 == 0: print("\t {}/{} paragraphs processed".format(index, len(df.index))) par_embed = np.loadtxt(PARAGRAPHS_FEATURES_FP, delimiter="\t") print("Computing node embeddings") print("Split in train/test sets") # get index for train test elements y = np.array(df["Y"].values) print("y shape: {}".format(y.shape)) Idx = np.array(range(len(df.index))) print("Idx shape: {}".format(Idx.shape)) train_idx, test_idx, y_train, y_test = train_test_split(Idx, y, random_state=RANDOM_SEED, stratify=y, test_size=.2) # X_train = title_embed[train_idx] X_train = np.concatenate((title_embed[train_idx], par_embed[train_idx]), axis=1) np.savetxt("data/X_train.tsv", X_train, delimiter='\t') print("X_train shape: {}".format(X_train.shape)) np.savetxt("data/y_train.tsv", y_train, delimiter='\t') print("y_train shape: {}".format(y_train.shape)) np.savetxt("data/title_train.tsv", titles[train_idx], fmt="%s", delimiter='\t') # X_test = title_embed[test_idx] X_test = np.concatenate((title_embed[test_idx], par_embed[test_idx]), axis=1) np.savetxt("data/X_test.tsv", X_test, delimiter='\t') print("X_test shape: {}".format(X_test.shape)) np.savetxt("data/y_test.tsv", y_test, delimiter='\t') print("y_test shape: {}".format(y_test.shape)) np.savetxt("data/title_test.tsv", titles[test_idx], fmt="%s", delimiter='\t') if __name__ == "__main__": args = parse_args() main(args)
#! /usr/bin/env python """Calculate vector divergence and related quantities at nodes or cells.""" import numpy as np from landlab.utils.decorators import use_field_name_or_array @use_field_name_or_array('link') def calc_flux_div_at_node(grid, unit_flux, out=None): """Calculate divergence of link-based fluxes at nodes. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each node (zero or "out" value for nodes without cells). Construction:: calc_flux_div_at_node(grid, unit_flux_at_links, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux : ndarray or field name Flux per unit width along links (x number of links) or across faces (x number of faces). Returns ------- ndarray (x number of nodes) Flux divergence at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) # there are 17 links >>> lg array([ 0. , 0. , 0. , 0. , 5. , 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -3.6, 0. , 0. , 0. , 0. ]) >>> calc_flux_div_at_node(rg, -lg) array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. , 0. , 0. , 0. ]) >>> fg = lg[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> calc_flux_div_at_node(rg, -fg) array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. , 0. , 0. , 0. ]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> unit_flux_at_links = np.zeros(rg.number_of_links) >>> unit_flux_at_links[rg.active_links] = -lg[rg.active_links] >>> calc_flux_div_at_node(rg, unit_flux_at_links) array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. , 0. , 0. , 0. ]) >>> unit_flux_at_faces = np.zeros(rg.number_of_faces) >>> unit_flux_at_faces[rg.active_faces] = -fg[rg.active_faces] >>> calc_flux_div_at_node(rg, unit_flux_at_faces) array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. , 0. , 0. , 0. ]) Notes ----- Performs a numerical flux divergence operation on nodes. LLCATS: NINF GRAD """ if unit_flux.size not in (grid.number_of_links, grid.number_of_faces): raise ValueError('Parameter unit_flux must be num links or num faces ' 'long') if out is None: out = grid.zeros(at='node') elif out.size != grid.number_of_nodes: raise ValueError('output buffer length mismatch with number of nodes') if unit_flux.size == grid.number_of_links: out[grid.node_at_cell] = _calc_net_face_flux_at_cell(grid, unit_flux[grid.link_at_face]) \ / grid.area_of_cell elif unit_flux.size == grid.number_of_faces: out[grid.node_at_cell] = _calc_net_face_flux_at_cell(grid, unit_flux) \ / grid.area_of_cell return out @use_field_name_or_array('link') def calc_net_flux_at_node(grid, unit_flux_at_links, out=None): """Calculate net link fluxes at nodes. Given a flux per unit width along each link in the grid, calculate the net outflux (or influx, if negative) at each node. Fluxes are treated as zero for links that have no faces, and net fluxes are treated as zero for nodes that have no cell. Construction:: calc_net_flux_at_node(grid, unit_flux_at_links, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_links : ndarray or field name Flux per unit width associated with links. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of cells) Net flux at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) # there are 17 links >>> lg array([ 0. , 0. , 0. , 0. , 5. , 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -3.6, 0. , 0. , 0. , 0. ]) >>> calc_net_flux_at_node(rg, -lg) array([ 0., 0., 0., 0., 0., 164., 94., 0., 0., 0., 0., 0.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> unit_flux_at_links = np.zeros(rg.number_of_links) >>> unit_flux_at_links[rg.active_links] = -lg[rg.active_links] >>> nlfn = calc_net_flux_at_node(rg, unit_flux_at_links) >>> np.round(nlfn) array([ 0., 0., 0., 0., 0., 114., 22., 0., 0., 0., 0., 0.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> lg = hg.calc_grad_at_link(z) # there are ? links >>> lg array([ 0. , 0. , 0. , 5. , 5. , 3.6, 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -5. , -3.6, -3.6, 0. , 0. , 0. ]) >>> nlfn = calc_net_flux_at_node(hg, -lg) >>> np.round(nlfn) array([ 0., 0., 0., 0., 152., 96., 0., 0., 0., 0.]) Notes ----- This is essentially a line integral for the fluxes along the boundaries of each cell. Hence, the resulting output has dimensions of total flux (so, if the unit flux happens to be mass per time per face width, the output will be in mass per unit time). Because a line integral is undefined where there are no cells (i.e., perimeter nodes), the result is given as zeros for these nodes. The current algorithm uses fancy indexing (calling _calc_net_face_flux_at_cells) and could probably be made faster. LLCATS: NINF GRAD """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = _calc_net_face_flux_at_cell(grid, unit_flux_at_links[grid.link_at_face]) return out @use_field_name_or_array('face') def _calc_net_face_flux_at_cell(grid, unit_flux_at_faces, out=None): """Calculate net face fluxes at cells. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) at each cell. Construction:: _calc_net_face_flux_at_cell(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name Flux per unit width associated with faces. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of cells) Net flux at cells. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) >>> fg = lg[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_net_face_flux_at_cell(rg, -fg) array([ 164., 94.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> unit_flux_at_faces = np.zeros(rg.number_of_faces) >>> unit_flux_at_faces[rg.active_faces] = -fg[rg.active_faces] >>> _calc_net_face_flux_at_cell(rg, unit_flux_at_faces) array([ 114., 22.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> lg = hg.calc_grad_at_link(z) >>> fg = lg[hg.link_at_face] # there are 11 faces >>> fg array([ 5. , 5. , 3.6, 3.6, 5. , -1.4, -3.6, -5. , -5. , -3.6, -3.6]) >>> nffc = _calc_net_face_flux_at_cell(hg, -fg) >>> np.round(nffc) array([ 152., 96.]) Notes ----- This is essentially a line integral for the fluxes along the boundaries of each cell. Hence, the resulting output has dimensions of total flux (so, if the unit flux happens to be mass per time per face width, the output will be in mass per unit time). """ if out is None: out = grid.empty(at='cell') total_flux = unit_flux_at_faces * grid.width_of_face out = np.zeros(grid.number_of_cells) fac = grid.faces_at_cell for c in range(grid.link_dirs_at_node.shape[1]): out -= total_flux[fac[:,c]] \ * grid.link_dirs_at_node[grid.node_at_cell,c] return out @use_field_name_or_array('face') def _calc_face_flux_divergence_at_cell(grid, unit_flux_at_faces): """Calculate divergence of face-based fluxes at cells. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each cell. Construction:: _calc_face_flux_divergence_at_cell(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name Flux per unit width associated with faces. Returns ------- ndarray (x number of cells) Flux divergence at cells. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) >>> lg[rg.link_at_face] array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_face_flux_divergence_at_cell(rg, -lg[rg.link_at_face]) array([ 1.64, 0.94]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> unit_flux_at_faces = np.zeros(rg.number_of_faces) >>> fg = lg[rg.link_at_face] >>> unit_flux_at_faces[rg.active_faces] = -fg[rg.active_faces] >>> _calc_face_flux_divergence_at_cell(rg, unit_flux_at_faces) array([ 1.14, 0.22]) Notes ----- Performs a numerical flux divergence operation on cells. """ return _calc_net_face_flux_at_cell(grid, unit_flux_at_faces) \ / grid.area_of_cell @use_field_name_or_array('face') def _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces, out=None): """Calculate net face fluxes at cells, ignoring values on inactive faces. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) at each cell. Same as `_calc_net_face_flux_at_cell` except that flux values on inactive faces are ignored. Construction:: _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name (x number of faces) Flux per unit width associated with faces. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of cells) Net flux at cells. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_net_active_face_flux_at_cell(rg, -fg) array([ 164., 94.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_net_active_face_flux_at_cell(rg, -fg) array([ 114., 22.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> fg = hg.calc_grad_at_link(z)[hg.link_at_face] # there are 11 faces >>> fg array([ 5. , 5. , 3.6, 3.6, 5. , -1.4, -3.6, -5. , -5. , -3.6, -3.6]) >>> nffc = _calc_net_active_face_flux_at_cell(hg, -fg) >>> np.round(nffc) array([ 152., 96.]) Notes ----- This is essentially a line integral for the fluxes along the boundaries of each cell. Hence, the resulting output has dimensions of total flux (so, if the unit flux happens to be mass per time per face width, the output will be in mass per unit time). """ if out is None: out = grid.empty(at='cell') total_flux = unit_flux_at_faces * grid.width_of_face out = np.zeros(grid.number_of_cells) fac = grid.faces_at_cell for c in range(grid.active_link_dirs_at_node.shape[1]): out -= total_flux[fac[:,c]] \ * grid.active_link_dirs_at_node[grid.node_at_cell,c] return out @use_field_name_or_array('face') def _calc_active_face_flux_divergence_at_cell(grid, unit_flux_at_faces): """Calculate divergence of face-based fluxes at cells, ignoring values on inactive faces. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each cell. Same as `_calc_face_flux_divergence_at_cell` except that flux values at inactive faces are ignored. Construction:: _calc_active_face_flux_divergence_at_cell(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name (x number of faces) Flux per unit width associated with faces. Returns ------- ndarray (x number of cells) Flux divergence at cells. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_active_face_flux_divergence_at_cell(rg, -fg) array([ 1.64, 0.94]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_active_face_flux_divergence_at_cell(rg, -fg) array([ 1.14, 0.22]) Notes ----- Performs a numerical flux divergence operation on cells. """ return _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) \ / grid.area_of_cell @use_field_name_or_array('link') def _calc_net_active_link_flux_at_node(grid, unit_flux_at_links, out=None): """Calculate net link fluxes at nodes, ignoring fluxes on inactive links. Given a flux per unit width along each link in the grid, calculate the net outflux (or influx, if negative) at each node. Fluxes are treated as zero for links that have no faces, and net fluxes are treated as zero for nodes that have no cell. Same as `_calc_net_link_flux_at_node` except that it ignores any flux values on inactive links. Construction:: _calc_net_active_link_flux_at_node(grid, unit_flux_at_links, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_links : ndarray or field name (x number of links) Flux per unit width associated with links. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of cells) Net flux at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) # there are 17 links >>> lg array([ 0. , 0. , 0. , 0. , 5. , 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -3.6, 0. , 0. , 0. , 0. ]) >>> _calc_net_active_link_flux_at_node(rg, -lg) array([ 0., 0., 0., 0., 0., 164., 94., 0., 0., 0., 0., 0.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> nlfn = _calc_net_active_link_flux_at_node(rg, -lg) >>> np.round(nlfn) array([ 0., 0., 0., 0., 0., 114., 22., 0., 0., 0., 0., 0.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> lg = hg.calc_grad_at_link(z) # there are ? links >>> lg array([ 0. , 0. , 0. , 5. , 5. , 3.6, 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -5. , -3.6, -3.6, 0. , 0. , 0. ]) >>> nlfn = _calc_net_active_link_flux_at_node(hg, -lg) >>> np.round(nlfn) array([ 0., 0., 0., 0., 152., 96., 0., 0., 0., 0.]) Notes ----- This is essentially a line integral for the fluxes along the boundaries of each cell. Hence, the resulting output has dimensions of total flux (so, if the unit flux happens to be mass per time per face width, the output will be in mass per unit time). Because a line integral is undefined where there are no cells (i.e., perimeter nodes), the result is given as zeros for these nodes. The current algorithm uses fancy indexing (calling _calc_net_face_flux_at_cells) and could probably be made faster. """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = _calc_net_active_face_flux_at_cell(grid, unit_flux_at_links[grid.link_at_face]) return out @use_field_name_or_array('link') def _calc_active_link_flux_divergence_at_node(grid, unit_flux_at_links, out=None): """Calculate divergence of link-based fluxes at nodes, ignoring any fluxes at inactive links. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each node (zero or "out" value for nodes without cells). Construction:: _calc_active_link_flux_divergence_at_node(grid, unit_flux_at_links, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_links : ndarray or field name (x number of links) Flux per unit width associated with links. Returns ------- ndarray (x number of nodes) Flux divergence at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> lg = rg.calc_grad_at_link(z) # there are 17 links >>> lg array([ 0. , 0. , 0. , 0. , 5. , 3.6, 0. , 5. , -1.4, -3.6, 0. , -5. , -3.6, 0. , 0. , 0. , 0. ]) >>> _calc_active_link_flux_divergence_at_node(rg, -lg) array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. , 0. , 0. , 0. ]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_active_link_flux_divergence_at_node(rg, -lg) array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. , 0. , 0. , 0. ]) Notes ----- Performs a numerical flux divergence operation on nodes. """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = _calc_net_active_face_flux_at_cell(grid, unit_flux_at_links[grid.link_at_face]) \ / grid.area_of_cell return out @use_field_name_or_array('face') def _calc_net_face_flux_at_node(grid, unit_flux_at_faces, out=None): """Calculate net face fluxes at nodes. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) at each node (nodes without cells are zero, or unchanged from `out` parameter if provided) Construction:: _calc_net_face_flux_at_node(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name Flux per unit width associated with faces. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of nodes) Net flux at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_net_face_flux_at_node(rg, -fg) array([ 0., 0., 0., 0., 0., 164., 94., 0., 0., 0., 0., 0.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> unit_flux_at_faces = np.zeros(rg.number_of_faces) >>> unit_flux_at_faces[rg.active_faces] = -fg[rg.active_faces] >>> _calc_net_face_flux_at_node(rg, unit_flux_at_faces) array([ 0., 0., 0., 0., 0., 114., 22., 0., 0., 0., 0., 0.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> fg = hg.calc_grad_at_link(z)[hg.link_at_face] # there are 11 faces >>> fg array([ 5. , 5. , 3.6, 3.6, 5. , -1.4, -3.6, -5. , -5. , -3.6, -3.6]) >>> nffc = _calc_net_face_flux_at_node(hg, -fg) >>> np.round(nffc) array([ 0., 0., 0., 0., 152., 96., 0., 0., 0., 0.]) Notes ----- Like _calc_net_face_flux_at_cells, this essentially performs a line integral for the fluxes along the boundaries of each cell. Nodes without cells are either assigned a zero value, or if `out` is provided, they retain their previous values. """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = _calc_net_face_flux_at_cell(grid, unit_flux_at_faces) return out @use_field_name_or_array('face') def _calc_net_active_face_flux_at_node(grid, unit_flux_at_faces, out=None): """Calculate net face fluxes at nodes, ignore inactive faces. Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) at each node (nodes without cells are zero, or unchanged from `out` parameter if provided). Same as `_calc_net_face_flux_at_node` except that it ignores inactive faces. Construction:: _calc_net_active_face_flux_at_node(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name (x number of faces) Flux per unit width associated with faces. out : ndarray, optional Buffer to hold the result. Returns ------- ndarray (x number of nodes) Net flux at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_net_active_face_flux_at_node(rg, -fg) array([ 0., 0., 0., 0., 0., 164., 94., 0., 0., 0., 0., 0.]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_net_active_face_flux_at_node(rg, -fg) array([ 0., 0., 0., 0., 0., 114., 22., 0., 0., 0., 0., 0.]) >>> from landlab import HexModelGrid >>> hg = HexModelGrid(3, 3, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation', noclobber=False) >>> z[4] = 50.0 >>> z[5] = 36.0 >>> fg = hg.calc_grad_at_link(z)[hg.link_at_face] # there are 11 faces >>> fg array([ 5. , 5. , 3.6, 3.6, 5. , -1.4, -3.6, -5. , -5. , -3.6, -3.6]) >>> nffc = _calc_net_active_face_flux_at_node(hg, -fg) >>> np.round(nffc) array([ 0., 0., 0., 0., 152., 96., 0., 0., 0., 0.]) Notes ----- Like _calc_net_face_flux_at_cells, this essentially performs a line integral for the fluxes along the boundaries of each cell. Nodes without cells are either assigned a zero value, or if `out` is provided, they retain their previous values. """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) return out @use_field_name_or_array('face') def _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None): """Calculate divergence of face-based fluxes at nodes (active faces only). Given a flux per unit width across each face in the grid, calculate the net outflux (or influx, if negative) divided by cell area, at each node that lies within a cell. Construction:: _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None) Parameters ---------- grid : ModelGrid A ModelGrid. unit_flux_at_faces : ndarray or field name (x number of faces) Flux per unit width associated with faces. out : ndarray (x number of nodes), optional Buffer to hold the result. Returns ------- ndarray (x number of nodes) Flux divergence at nodes. Examples -------- >>> from landlab import RasterModelGrid, CLOSED_BOUNDARY >>> rg = RasterModelGrid(3, 4, 10.0) >>> z = rg.add_zeros('node', 'topographic__elevation') >>> z[5] = 50.0 >>> z[6] = 36.0 >>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces >>> fg array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6]) >>> _calc_active_face_flux_divergence_at_node(rg, -fg) array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. , 0. , 0. , 0. ]) >>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY) >>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY) >>> _calc_active_face_flux_divergence_at_node(rg, -fg) array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. , 0. , 0. , 0. ]) Notes ----- Performs a numerical flux divergence operation on cells, and returns the result in an array of length equal to the number of nodes. Nodes without cells (those on the grid perimeter) are not affected (i.e., their value is either zero, or if `out` is given, whatever the prior value in `out` was). """ if out is None: out = grid.zeros(at='node') out[grid.node_at_cell] = \ _calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) \ / grid.area_of_cell return out
VBA = \ r''' Function IsAdmin() On Error Resume Next CreateObject("WScript.Shell").RegRead("HKEY_USERS\S-1-5-19\Environment\TEMP") if Err.number = 0 Then IsAdmin = True else IsAdmin = False end if Err.Clear On Error goto 0 End Function Function GetComputerName() Set objWMISvc = GetObject( "winmgmts:\\.\root\cimv2" ) Set colItems = objWMISvc.ExecQuery( "Select * from Win32_ComputerSystem", , 48 ) For Each objItem in colItems strComputerName = objItem.Name GetComputerName = strComputerName Next End Function Function IsCurrentUserMemberOfAdminGroup() Dim objShell,grouplistD Dim ADSPath As String Dim objWMIService, colItems, Path On Error Resume Next Dim userdomain As String Dim username As String Dim strQuery As String userdomain = "userdomain" username = "username" Dim computerNameStr As String ' The current user ADSPath = EnvString(userdomain) & "/" & EnvString(username) 'Get list of all administrators for local machine (could also work for another machine computerNameStr = GetComputerName() Set objWMIService = GetObject("winmgmts:{impersonationLevel=impersonate}!\\.\root\cimv2") strQuery = "select * from Win32_GroupUser where GroupComponent = " & chr(34) & "Win32_Group.Domain='" & computerNameStr & "',Name='Administrators'" & Chr(34) Set ColItems = objWMIService.ExecQuery(strQuery) ' Admins are stored in a dictionnary Set groupList = CreateObject("Scripting.Dictionary") For Each Path In ColItems Dim strMemberName As String Dim strDomainName As String Dim NamesArray As Variant Dim DomainNameArray As Variant NamesArray = Split(Path.PartComponent,",") strMemberName = Replace(Replace(NamesArray(1),Chr(34),""),"Name=","") DomainNameArray = Split(NamesArray(0),"=") strDomainName = Replace(DomainNameArray(1),Chr(34),"") 'If strDomainName <> strComputerName Then strMemberName = strDomainName & "/" & strMemberName 'End If groupList.Add strMemberName, "-" Next ' check is current user is in dictionnary IsCurrentUserMemberOfAdminGroup = CBool(groupList.Exists(ADSPath)) End Function 'This function returns a particular environment variable's value. ' for example, if you use EnvString("username"), it would return ' the value of %username%. Function EnvString(variable) Dim objShell set objShell = CreateObject( "WScript.Shell" ) variable = "%" & variable & "%" EnvString = objShell.ExpandEnvironmentStrings(variable) ' Clean up Set objShell = Nothing End Function '''
import csv from torch.utils.data import Dataset, DataLoader import numpy as np from base.torchvision_dataset import TorchvisionDataset import torchvision.transforms as transforms from .preprocessing import get_target_label_idx import torch from torch.utils.data import Subset class CreditFraud_Dataset(TorchvisionDataset): def __init__(self, root: str, normal_class=0): super().__init__(root) self.n_classes = 2 # 0: normal, 1: outlier self.normal_classes = tuple([normal_class]) self.outlier_classes = [0,1] self.outlier_classes.remove(normal_class) transform = None target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes)) train_set = MyCreditFraud(root=self.root, train=True, transform=transform) # Subset train_set to normal class train_idx_normal = get_target_label_idx(train_set.train_labels.clone().data.cpu().numpy(), self.normal_classes) self.train_set = Subset(train_set, train_idx_normal) self.test_set = MyCreditFraud(root=self.root, train=False, transform=transform) class MyCreditFraud(Dataset): """Torchvision Credit Fraud class with patch of __getitem__ method to also return the index of a data sample.""" def __init__(self, root, train, transform): # Path to the csv file path_2_csv = "/home/liviu/Documents/Dev/Deep-SVDD-PyTorch/Datasets/CreditFraud/creditcard.csv" # Open and read the csv with open(path_2_csv, mode='r') as infile: reader = csv.reader(infile) table = [] for row in reader: table.append(list(row)) table = table[1:-1] table = [[float(x) for x in row] for row in table] table = np.asarray(table) features = table[:,0:30] labels = table[:,-1] features[:,0] = features[:,0]/np.max(features[:,0]) # print(features[]) self.train_data = torch.from_numpy(features[0:200000]) self.test_data = torch.from_numpy(features[200000:284807]) self.train_labels = torch.from_numpy(labels[0:200000]) self.test_labels = torch.from_numpy(labels[200000:284807]) # self.train_data = torch.from_numpy(features[0:200000]) # self.test_data = torch.from_numpy(features[0:200000]) # self.train_labels = torch.from_numpy(labels[0:200000]) # self.test_labels = torch.from_numpy(labels[0:200000]) self.train = train self.transform = transform def __getitem__(self, index): if self.train: features, target = self.train_data[index], self.train_labels[index] else: features, target = self.test_data[index], self.test_labels[index] if self.transform: return self.transform(features), self.transform(target), index else: return features, target, index def __len__(self): if self.train: return len(self.train_data) return len(self.test_data)
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2020 Dan <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. from io import BytesIO from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector from pyrogram.raw.core import TLObject from pyrogram import raw from typing import List, Union, Any # # # # # # # # # # # # # # # # # # # # # # # # # !!! WARNING !!! # # This is a generated file! # # All changes made in this file will be lost! # # # # # # # # # # # # # # # # # # # # # # # # # class UpdateDialogUnreadMark(TLObject): # type: ignore """This object is a constructor of the base type :obj:`~pyrogram.raw.base.Update`. Details: - Layer: ``117`` - ID: ``0xe16459c3`` Parameters: peer: :obj:`DialogPeer <pyrogram.raw.base.DialogPeer>` unread (optional): ``bool`` """ __slots__: List[str] = ["peer", "unread"] ID = 0xe16459c3 QUALNAME = "types.UpdateDialogUnreadMark" def __init__(self, *, peer: "raw.base.DialogPeer", unread: Union[None, bool] = None) -> None: self.peer = peer # DialogPeer self.unread = unread # flags.0?true @staticmethod def read(data: BytesIO, *args: Any) -> "UpdateDialogUnreadMark": flags = Int.read(data) unread = True if flags & (1 << 0) else False peer = TLObject.read(data) return UpdateDialogUnreadMark(peer=peer, unread=unread) def write(self) -> bytes: data = BytesIO() data.write(Int(self.ID, False)) flags = 0 flags |= (1 << 0) if self.unread is not None else 0 data.write(Int(flags)) data.write(self.peer.write()) return data.getvalue()
from flask import Blueprint gpx2tcx = Blueprint('gpx2tcx', __name__, template_folder='../templates', static_folder='../static')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems This module is developed by: Yalin Li <zoe.yalin.li@gmail.com> This module is under the University of Illinois/NCSA Open Source License. Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt for license details. ''' # %% from warnings import warn from thermosteam.utils import registered from .utils import parse_unit, load_data, copy_attr __all__ = ('ImpactIndicator', ) @registered(ticket_name='ind') class ImpactIndicator: ''' To handle different impact indicators in life cycle assessment. Parameters ---------- ID : str ID of this impact indicator. alias : str Alternative ID of this impact indicator. .. note:: "synonym" was used before v0.2.2 it is still supported, but may be removed in the future. method : str Impact assessment method, e.g., 'TRACI'. category : str Category of this impact indicator, e.g., 'human health'. unit : str Unit of this impact indicator, e.g., 'kg CO2-eq'. description : str Supplementary explanation. Examples -------- Make an impact indicator for global warming potential. >>> import qsdsan as qs >>> GWP = qs.ImpactIndicator('GlobalWarming', method='TRACI', ... category='environmental impact', ... unit='kg CO2-eq', ... description='Effect of climate change measured as \ ... global warming potential.') See relevant information. >>> GWP.show() ImpactIndicator: GlobalWarming as kg CO2-eq Alias : None Method : TRACI Category : environmental impact Description: Effect of climate change ... >>> # Add an alias >>> GWP.alias = 'GWP' >>> GWP.show() ImpactIndicator: GlobalWarming as kg CO2-eq Alias : GWP Method : TRACI Category : environmental impact Description: Effect of climate change ... >>> # Add another impact indicator >>> FEC = qs.ImpactIndicator('FossilEnergyConsumption', alias='FEC', unit='MJ') >>> # Get all impact indicators >>> qs.ImpactIndicator.get_all_indicators() {'GlobalWarming': <ImpactIndicator: GlobalWarming>, 'FossilEnergyConsumption': <ImpactIndicator: FossilEnergyConsumption>} Manage the registry. >>> GWP.deregister() The impact indicator "GlobalWarming" has been removed from the registry. >>> qs.ImpactIndicator.get_all_indicators() {'FossilEnergyConsumption': <ImpactIndicator: FossilEnergyConsumption>} >>> GWP.register() The impact indicator "GlobalWarming" has been added to the registry. >>> qs.ImpactIndicator.get_all_indicators() {'FossilEnergyConsumption': <ImpactIndicator: FossilEnergyConsumption>, 'GlobalWarming': <ImpactIndicator: GlobalWarming>} >>> qs.ImpactIndicator.clear_registry() All impact indicators have been removed from registry. >>> qs.ImpactIndicator.get_all_indicators() {} ''' __slots__ = ('_ID', '_alias', '_method', '_category', '_unit', '_ureg_unit', '_unit_remaining', '_description') def __init__(self, ID='', alias='', method='', category='', unit='', description='', **kwargs): self._register(ID) self.alias = alias self._unit = str(unit) self._ureg_unit, self._unit_remaining = parse_unit(unit) self._method = method self._category = category self._description = description if 'synonym' in kwargs.keys(): synonym = kwargs['synonym'] if (not alias or str(alias)=='nan'): raise DeprecationWarning('`synonym` has been changed to `alias` for qsdsan v0.2.2 and above.') alias = synonym else: raise DeprecationWarning('`synonym` has been changed to `alias` for qsdsan v0.2.2 and above, ' \ f'the given `synonym` "{synonym}" is ignored as `alias` "{alias}" is provided.') def __repr__(self): return f'<ImpactIndicator: {self.ID}>' def show(self): '''Show basic information about this impact indicator.''' if self.unit: info = f'ImpactIndicator: {self.ID} as {self.unit}' else: info = f'ImpactIndicator: {self.ID}' alias = self.alias if self.alias else 'None' line = f'\n Alias : {alias}' if len(line) > 40: line = line[:40] + '...' info += line info += f'\n Method : {self.method or None}' info += f'\n Category : {self.category or None}' line = f'\n Description: {self.description or None}' if len(line) > 40: line = line[:40] + '...' info += line print(info) _ipython_display_ = show def copy(self, new_ID=''): ''' Return a new :class:`ImpactIndicator` object with the same settings. .. note: Aliases will not be copied. Parameters ---------- new_ID : str ID of the new impact indicator. Examples -------- >>> import qsdsan as qs >>> GWP = qs.ImpactIndicator('GlobalWarming', alias='GWP', method='TRACI', ... category='environmental impact', ... unit='kg CO2-eq', ... description='Effect of climate change measured as \ ... global warming potential.') >>> GWP.show() # doctest: +ELLIPSIS ImpactIndicator: GlobalWarming as kg CO2-eq Alias : GWP Method : TRACI Category : environmental impact Description: Effect of climate change ... >>> GWP_cp = GWP.copy() >>> GWP_cp.show() # doctest: +ELLIPSIS ImpactIndicator: ind1 as kg CO2-eq Alias : None Method : TRACI Category : environmental impact Description: Effect of climate change ... ''' cls = self.__class__ new = cls.__new__(cls) new.__init__(new_ID) new = copy_attr(new, self, skip=('_ID', '_alias')) return new def register(self, print_msg=True): '''Add this impact indicator to the registry.''' self.registry.register_safely(self.ID, self) if print_msg: print(f'The impact indicator "{self.ID}" has been added to the registry.') def deregister(self, print_msg=True): '''Remove this impact indicator from the registry.''' self.registry.discard(self.ID) if print_msg: print(f'The impact indicator "{self.ID}" has been removed from the registry.') @classmethod def clear_registry(cls, print_msg=True): '''Remove all existing impact indicators from the registry.''' cls.registry.clear() if print_msg: print('All impact indicators have been removed from registry.') @classmethod def get_all_indicators(cls, include_alias=False): ''' Get all defined impact indicator as a dict. Parameters ---------- include_alias : bool If True, aliases will be included as keys in the dict as well. ''' if not include_alias: return cls.registry.data else: dct = cls.registry.data.copy() dct.update(cls._get_alias_dct()) return dct @classmethod def get_indicator(cls, ID_or_alias): '''Get an impact indicator by its ID or alias.''' dct = cls.get_all_indicators(True) return dct.get(ID_or_alias) @classmethod def load_indicators_from_file(cls, path_or_dict, index_col=None): '''Same as :func:`load_from_file`, has been deprecated.''' warn('`load_indicators_from_file` has been deprecated, ' 'please use `load_from_file` instead.', stacklevel=2) cls.load_from_file(path_or_dict, index_col) @classmethod def load_from_file(cls, path_or_df, index_col=None): ''' Load impact indicator from a datasheet. The first row of this datasheet should have "indicator" (it is used as the ID, e.g., GlobalWarming), "alias" (e.g., GWP), "unit" (e.g., kg CO2-eq), "method" (e.g., TRACI), "category" (e.g., environmental impact), and "description". Aside from "indicator", other information is optional. Each row should be a data entry. .. note:: This function is just one way to batch-load impact indicators, you can always write your own function that fits your datasheet format, as long as it provides all the information to construct the impact indicator. Parameters ---------- path_or_df : str or :class:`pandas.DataFrame` DataFrame or complete path of the datasheet, currently support tsv, csv, and xls/xlsx. index_col : None or int Index column of the :class:`pandas.DataFrame`. See Also -------- Refer to the `Bwaise system <https://github.com/QSD-Group/EXPOsan/tree/main/exposan/bwaise/data>`_ in the `Exposan` repository for a sample file. ''' data = load_data(path=path_or_df, index_col=index_col) if isinstance(path_or_df, str) else path_or_df for num in data.index: new = cls.__new__(cls) kwargs = {} for k in ('alias', 'unit', 'method', 'category', 'description'): try: kwargs[k] = data.iloc[num][k] except KeyError: kwargs[k] = '' new.__init__(ID=data.iloc[num]['indicator'], **kwargs) @classmethod def _get_alias_dct(cls): dct = {} for i in cls.registry.data.values(): if i.alias: dct[i.alias] = i return dct @property def ID(self): '''[str] ID of this impact indicator.''' return self._ID @ID.setter def ID(self, ID): self._ID = ID @property def alias(self): '''[str] Alias of this impact indicator.''' if not hasattr(self, '_alias'): # for initiation self._alias = None return self._alias @alias.setter def alias(self, alias): alias = None if str(alias) == 'nan' else alias alias_dct = self._get_alias_dct() if alias: if not isinstance(alias, str): raise TypeError(f'`alias` can only be a str, not {type(alias).__name__}.') if alias in alias_dct.keys(): old_ind = alias_dct[alias] if old_ind.ID != self.ID: warn(f'The alias "{alias}" is now being used for "{self.ID}", ' \ f'instead of {old_ind.ID}.') old_ind._alias = None self._alias = alias else: self._alias = None @property def unit(self): '''[str] Unit of this impact indicator.''' return self._unit @unit.setter def unit(self, i): self._unit = str(i) self._ureg_unit, self._unit_remaining = parse_unit(i) @property def method(self): '''[str] Impact assessment method of this impact indicator.''' return self._method @method.setter def method(self, i): self._method = i @property def category(self): '''[str] Impact category of this impact indicator.''' return self._category @category.setter def category(self, i): self._category = i @property def description(self): '''[str] Description of this impact indicator.''' return self._description @description.setter def description(self, i): self._description = i @property def registered(self): '''[bool] If this impact indicator is registered in the record.''' data = self.registry.data.get(self.ID) return True if data else False
import typing from functools import partial from delira.models.backends.chainer import AbstractChainerNetwork from delira.data_loading import BaseDataManager from delira.training.base_experiment import BaseExperiment from delira.utils import DeliraConfig from delira.training.backends.chainer.utils import create_optims_default from delira.training.backends.chainer.utils import convert_to_numpy from delira.training.backends.chainer.trainer import ChainerNetworkTrainer class ChainerExperiment(BaseExperiment): def __init__(self, config: typing.Union[str, DeliraConfig], model_cls: AbstractChainerNetwork, n_epochs=None, name=None, save_path=None, key_mapping=None, val_score_key=None, optim_builder=create_optims_default, checkpoint_freq=1, trainer_cls=ChainerNetworkTrainer, **kwargs): """ Parameters ---------- config : :class:`DeliraConfig` or str the training config, if string is passed, it is treated as a path to a file, where the config is loaded from model_cls : Subclass of :class:`AbstractChainerNetwork` the class implementing the model to train n_epochs : int or None the number of epochs to train, if None: can be specified later during actual training name : str or None the Experiment's name save_path : str or None the path to save the results and checkpoints to. if None: Current working directory will be used key_mapping : dict mapping between data_dict and model inputs (necessary for prediction with :class:`Predictor`-API), if no keymapping is given, a default key_mapping of {"x": "data"} will be used here val_score_key : str or None key defining which metric to use for validation (determining best model and scheduling lr); if None: No validation-based operations will be done (model might still get validated, but validation metrics can only be logged and not used further) optim_builder : function Function returning a dict of backend-specific optimizers. defaults to :func:`create_optims_default_chainer` checkpoint_freq : int frequency of saving checkpoints (1 denotes saving every epoch, 2 denotes saving every second epoch etc.); default: 1 trainer_cls : subclass of :class:`ChainerNetworkTrainer` the trainer class to use for training the model, defaults to :class:`ChainerNetworkTrainer` **kwargs : additional keyword arguments """ if key_mapping is None: key_mapping = {"x": "data"} super().__init__(config=config, model_cls=model_cls, n_epochs=n_epochs, name=name, save_path=save_path, key_mapping=key_mapping, val_score_key=val_score_key, optim_builder=optim_builder, checkpoint_freq=checkpoint_freq, trainer_cls=trainer_cls, **kwargs) def test(self, network: AbstractChainerNetwork, test_data: BaseDataManager, metrics: dict, metric_keys=None, verbose=False, prepare_batch=None, convert_fn=convert_to_numpy, **kwargs): """ Setup and run testing on a given network Parameters ---------- network : :class:`AbstractNetwork` the (trained) network to test test_data : :class:`BaseDataManager` the data to use for testing metrics : dict the metrics to calculate metric_keys : dict of tuples the batch_dict keys to use for each metric to calculate. Should contain a value for each key in ``metrics``. If no values are given for a key, per default ``pred`` and ``label`` will be used for metric calculation verbose : bool verbosity of the test process prepare_batch : function function to convert a batch-dict to a format accepted by the model. This conversion typically includes dtype-conversion, reshaping, wrapping to backend-specific tensors and pushing to correct devices. If not further specified uses the ``network``'s ``prepare_batch`` with CPU devices convert_fn : function function to convert a batch of tensors to numpy if not specified defaults to :func:`convert_chainer_tensor_to_npy` **kwargs : additional keyword arguments Returns ------- dict all predictions obtained by feeding the ``test_data`` through the ``network`` dict all metrics calculated upon the ``test_data`` and the obtained predictions """ # use backend-specific and model-specific prepare_batch fn # (runs on same device as passed network per default) device = network.device if prepare_batch is None: prepare_batch = partial(network.prepare_batch, input_device=device, output_device=device) return super().test(network=network, test_data=test_data, metrics=metrics, metric_keys=metric_keys, verbose=verbose, prepare_batch=prepare_batch, convert_fn=convert_fn, **kwargs)
# -*- coding: utf-8 -*- """ Created on Mon Jul 31 15:41:28 2017 @author: Aman Kedia """ # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Churn_Modelling.csv') X = dataset.iloc[:, 3: 13].values y = dataset.iloc[:, 13].values # Encoding categorical data # Encoding the Independent Variable from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) onehotencoder = OneHotEncoder(categorical_features = [1]) X = onehotencoder.fit_transform(X).toarray() X = X[:, 1:] # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) import keras from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score from keras.models import Sequential from keras.layers import Dense def build_classifier(): classifier = Sequential() classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11)) classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu')) classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) return classifier classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100) #Measuring ANN performance accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10) mean = accuracies.mean() variance = accuracies.std()
from base import BaseTest import requests import json class Test(BaseTest): def test_root(self): """ Test / http endpoint """ self.render_config_template( ) proc = self.start_beat(extra_args=["-E", "http.enabled=true"]) self.wait_until(lambda: self.log_contains("Starting stats endpoint")) r = requests.get("http://localhost:5066") assert r.status_code == 200 data = json.loads(r.content) assert data["beat"] == "mockbeat" assert data["version"] == "9.9.9" proc.check_kill_and_wait() def test_stats(self): """ Test /stats http endpoint """ self.render_config_template( ) proc = self.start_beat(extra_args=["-E", "http.enabled=true"]) self.wait_until(lambda: self.log_contains("Starting stats endpoint")) r = requests.get("http://localhost:5066/stats") assert r.status_code == 200 data = json.loads(r.content) # Test one data point assert data["libbeat"]["config"]["reloads"] == 0 proc.check_kill_and_wait() def test_error(self): """ Test not existing http endpoint """ self.render_config_template( ) proc = self.start_beat(extra_args=["-E", "http.enabled=true"]) self.wait_until(lambda: self.log_contains("Starting stats endpoint")) r = requests.get("http://localhost:5066/not-exist") assert r.status_code == 404 proc.check_kill_and_wait()
"""Functions related to getting soundcloud data.""" import requests def get_track_info(url): """ Get the track info of the passed URL. """ _client_ID = 'LvWovRaJZlWCHql0bISuum8Bd2KX79mb' api = "http://api.soundcloud.com/resolve.json?url={}&client_id={}" URL = api.format(url, _client_ID) r = requests.get(URL).json() title = r['title'] stream_url = r['stream_url'] + '?client_id=' + _client_ID return title, stream_url
""" Django settings for app project. Generated by 'django-admin startproject' using Django 3.2.5. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path import os # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-^g=zse&)53(93+8wf4&fbk4#^82os=cd=r9z3uuj4137n*zeud' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # custom apps 'rest_framework', 'rest_framework.authtoken', 'user', 'core' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'HOST': os.environ.get('DB_HOST'), 'NAME': os.environ.get('DB_NAME'), 'USER': os.environ.get('DB_USER'), 'PASSWORD': os.environ.get('DB_PASS'), } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' AUTH_USER_MODEL = 'core.User' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
from __future__ import print_function import sys import json import argparse import pyjq import os.path from shared.nodes import Account, Region from shared.common import parse_arguments, query_aws from os import listdir __description__ = "Cross-reference EC2 instances with AMI information" def log_warning(msg): print("WARNING: {}".format(msg), file=sys.stderr) def find_image(image_id, public_images, account_images): for image in public_images: if image_id == image["ImageId"]: return image, "public" for image in account_images: if image_id == image["ImageId"]: return image, "private" return None, "unknown_image" def get_instance_name(instance): if "Tags" in instance: for tag in instance["Tags"]: if tag["Key"] == "Name": return tag["Value"] return None def amis(args, accounts, config): # Loading the list of public images from disk takes a while, so we'll iterate by region regions_file = "data/aws/eu-west-3/ec2-describe-images.json" if not os.path.isfile(regions_file): raise Exception( "You need to download the set of public AMI images. Run:\n" " mkdir -p data/aws\n" " cd data/aws\n" " aws ec2 describe-regions | jq -r '.Regions[].RegionName' | xargs -I{} mkdir {}\n" " aws ec2 describe-regions | jq -r '.Regions[].RegionName' | xargs -I{} sh -c 'aws --region {} ec2 describe-images --executable-users all > {}/ec2-describe-images.json'\n" ) print( "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format( "Account Name", "Region Name", "Instance Id", "Instance Name", "AMI ID", "Is Public", "AMI Description", "AMI Owner", ) ) for region in listdir("data/aws/"): # Get public images public_images_file = "data/aws/{}/ec2-describe-images.json".format(region) public_images = json.load(open(public_images_file)) resource_filter = ".Images[]" public_images = pyjq.all(resource_filter, public_images) for account in accounts: account = Account(None, account) region = Region(account, {"RegionName": region}) instances = query_aws(account, "ec2-describe-instances", region) resource_filter = ( '.Reservations[].Instances[] | select(.State.Name == "running")' ) if args.instance_filter != "": resource_filter += "|{}".format(args.instance_filter) instances = pyjq.all(resource_filter, instances) account_images = query_aws(account, "ec2-describe-images", region) resource_filter = ".Images[]" account_images = pyjq.all(resource_filter, account_images) for instance in instances: image_id = instance["ImageId"] image_description = "" owner = "" image, is_public_image = find_image( image_id, public_images, account_images ) if image: # Many images don't have all fields, so try the Name, then Description, then ImageLocation image_description = image.get("Name", "") if image_description == "": image_description = image.get("Description", "") if image_description == "": image_description = image.get("ImageLocation", "") owner = image.get("OwnerId", "") print( "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format( account.name, region.name, instance["InstanceId"], get_instance_name(instance), image_id, is_public_image, image_description, owner, ) ) def run(arguments): parser = argparse.ArgumentParser() parser.add_argument( "--instance_filter", help='Filter on the EC2 info, for example `select(.Platform == "windows")` or `select(.Architecture!="x86_64")`', default="", ) args, accounts, config = parse_arguments(arguments, parser) amis(args, accounts, config)
# -*- coding: utf-8 -*- ''' Support for Portage :optdepends: - portage Python adapter For now all package names *MUST* include the package category, i.e. ``'vim'`` will not work, ``'app-editors/vim'`` will. ''' from __future__ import absolute_import # Import python libs import copy import logging import re # Import salt libs import salt.utils from salt.exceptions import CommandExecutionError, MinionError import salt.ext.six as six # Import third party libs HAS_PORTAGE = False try: import portage HAS_PORTAGE = True except ImportError: import os import sys if os.path.isdir('/usr/lib/portage/pym'): try: # In a virtualenv, the portage python path needs to be manually added sys.path.insert(0, '/usr/lib/portage/pym') import portage HAS_PORTAGE = True except ImportError: pass log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Confirm this module is on a Gentoo based system ''' if HAS_PORTAGE and __grains__['os'] == 'Gentoo': return __virtualname__ return False def _vartree(): import portage portage = reload(portage) return portage.db[portage.root]['vartree'] def _porttree(): import portage portage = reload(portage) return portage.db[portage.root]['porttree'] def _p_to_cp(p): ret = _porttree().dbapi.xmatch("match-all", p) if ret: return portage.cpv_getkey(ret[0]) return None def _allnodes(): if 'portage._allnodes' in __context__: return __context__['portage._allnodes'] else: ret = _porttree().getallnodes() __context__['portage._allnodes'] = ret return ret def _cpv_to_cp(cpv): ret = portage.cpv_getkey(cpv) if ret: return ret else: return cpv def _cpv_to_version(cpv): return portage.versions.cpv_getversion(cpv) def _process_emerge_err(stdout, stderr): ''' Used to parse emerge output to provide meaningful output when emerge fails ''' ret = {} changes = {} rexp = re.compile(r'^[<>=][^ ]+/[^ ]+ [^\n]+', re.M) slot_conflicts = re.compile(r'^[^ \n]+/[^ ]+:[^ ]', re.M).findall(stderr) if slot_conflicts: changes['slot conflicts'] = slot_conflicts blocked = re.compile(r'(?m)^\[blocks .+\] ' r'([^ ]+/[^ ]+-[0-9]+[^ ]+)' r'.*$').findall(stdout) unsatisfied = re.compile( r'Error: The above package list contains').findall(stderr) # If there were blocks and emerge could not resolve it. if blocked and unsatisfied: changes['blocked'] = blocked sections = re.split('\n\n', stderr) for section in sections: if 'The following keyword changes' in section: changes['keywords'] = rexp.findall(section) elif 'The following license changes' in section: changes['license'] = rexp.findall(section) elif 'The following USE changes' in section: changes['use'] = rexp.findall(section) elif 'The following mask changes' in section: changes['mask'] = rexp.findall(section) ret['changes'] = {'Needed changes': changes} return ret def check_db(*names, **kwargs): ''' .. versionadded:: 0.17.0 Returns a dict containing the following information for each specified package: 1. A key ``found``, which will be a boolean value denoting if a match was found in the package database. 2. If ``found`` is ``False``, then a second key called ``suggestions`` will be present, which will contain a list of possible matches. This list will be empty if the package name was specified in ``category/pkgname`` format, since the suggestions are only intended to disambiguate ambiguous package names (ones submitted without a category). CLI Examples: .. code-block:: bash salt '*' pkg.check_db <package1> <package2> <package3> ''' ### NOTE: kwargs is not used here but needs to be present due to it being ### required in the check_db function in other package providers. ret = {} for name in names: if name in ret: log.warning('pkg.check_db: Duplicate package name \'{0}\' ' 'submitted'.format(name)) continue if '/' not in name: ret.setdefault(name, {})['found'] = False ret[name]['suggestions'] = porttree_matches(name) else: ret.setdefault(name, {})['found'] = name in _allnodes() if ret[name]['found'] is False: ret[name]['suggestions'] = [] return ret def ex_mod_init(low): ''' If the config option ``ebuild.enforce_nice_config`` is set to True, this module will enforce a nice tree structure for /etc/portage/package.* configuration files. .. versionadded:: 0.17.0 Initial automatic enforcement added when pkg is used on a Gentoo system. .. versionchanged:: 2014.1.0-Hydrogen Configure option added to make this behaviour optional, defaulting to off. .. seealso:: ``ebuild.ex_mod_init`` is called automatically when a state invokes a pkg state on a Gentoo system. :py:func:`salt.states.pkg.mod_init` ``ebuild.ex_mod_init`` uses ``portage_config.enforce_nice_config`` to do the lifting. :py:func:`salt.modules.portage_config.enforce_nice_config` CLI Example: .. code-block:: bash salt '*' pkg.ex_mod_init ''' if __salt__['config.get']('ebuild.enforce_nice_config', False): __salt__['portage_config.enforce_nice_config']() return True def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ... ''' refresh = salt.utils.is_true(kwargs.pop('refresh', True)) if len(names) == 0: return '' # Refresh before looking for the latest version available if refresh: refresh_db() ret = {} # Initialize the dict with empty strings for name in names: ret[name] = '' installed = _cpv_to_version(_vartree().dep_bestmatch(name)) avail = _cpv_to_version(_porttree().dep_bestmatch(name)) if avail: ret[name] = avail # Return a string if only one package name passed if len(names) == 1: return ret[names[0]] return ret # available_version is being deprecated available_version = salt.utils.alias_function(latest_version, 'available_version') def _get_upgradable(backtrack=3): ''' Utility function to get upgradable packages Sample return data: { 'pkgname': '1.2.3-45', ... } ''' cmd = ['emerge', '--ask', 'n', '--backtrack', '{0}'.format(backtrack), '--pretend', '--update', '--newuse', '--deep', '@world'] call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += call['stderr'] if 'stdout' in call: comment += call['stdout'] raise CommandExecutionError( '{0}'.format(comment) ) else: out = call['stdout'] rexp = re.compile(r'(?m)^\[.+\] ' r'([^ ]+/[^ ]+)' # Package string '-' r'([0-9]+[^ ]+)' # Version r'.*$') keys = ['name', 'version'] _get = lambda l, k: l[keys.index(k)] upgrades = rexp.findall(out) ret = {} for line in upgrades: name = _get(line, 'name') version_num = _get(line, 'version') ret[name] = version_num return ret def list_upgrades(refresh=True, backtrack=3): ''' List all available package upgrades. refresh Whether or not to sync the portage tree before checking for upgrades. backtrack Specifies an integer number of times to backtrack if dependency calculation fails due to a conflict or an unsatisfied dependency (default: ´3´). .. versionadded: 2015.8.0 CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' if salt.utils.is_true(refresh): refresh_db() return _get_upgradable(backtrack) def upgrade_available(name): ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return latest_version(name) != '' def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ... ''' return __salt__['pkg_resource.version'](*names, **kwargs) def porttree_matches(name): ''' Returns a list containing the matches for a given package name from the portage tree. Note that the specific version of the package will not be provided for packages that have several versions in the portage tree, but rather the name of the package (i.e. "dev-python/paramiko"). ''' matches = [] for category in _porttree().dbapi.categories: if _porttree().dbapi.cp_list(category + "/" + name): matches.append(category + "/" + name) return matches def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret ret = {} pkgs = _vartree().dbapi.cpv_all() for cpv in pkgs: __salt__['pkg_resource.add_pkg'](ret, _cpv_to_cp(cpv), _cpv_to_version(cpv)) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def refresh_db(): ''' Updates the portage tree (emerge --sync). Uses eix-sync if available. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' if 'eix.sync' in __salt__: return __salt__['eix.sync']() if 'makeconf.features_contains'in __salt__ and __salt__['makeconf.features_contains']('webrsync-gpg'): # GPG sign verify is supported only for "webrsync" cmd = 'emerge-webrsync -q' # We prefer 'delta-webrsync' to 'webrsync' if salt.utils.which('emerge-delta-webrsync'): cmd = 'emerge-delta-webrsync -q' return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 else: if __salt__['cmd.retcode']('emerge --ask n --quiet --sync', python_shell=False) == 0: return True # We fall back to "webrsync" if "rsync" fails for some reason cmd = 'emerge-webrsync -q' # We prefer 'delta-webrsync' to 'webrsync' if salt.utils.which('emerge-delta-webrsync'): cmd = 'emerge-delta-webrsync -q' return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 def _flags_changed(inst_flags, conf_flags): ''' @type inst_flags: list @param inst_flags: list of use flags which were used when package was installed @type conf_flags: list @param conf_flags: list of use flags form portage/package.use @rtype: bool @return: True, if lists have changes ''' conf_flags = conf_flags[:] for i in inst_flags: try: conf_flags.remove(i) except ValueError: return True return True if conf_flags else False def install(name=None, refresh=False, pkgs=None, sources=None, slot=None, fromrepo=None, uses=None, binhost=None, **kwargs): ''' Install the passed package(s), add refresh=True to sync the portage tree before package is installed. name The name of the package to be installed. Note that this parameter is ignored if either "pkgs" or "sources" is passed. Additionally, please note that this option can only be used to emerge a package from the portage tree. To install a tbz2 package manually, use the "sources" option described below. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> refresh Whether or not to sync the portage tree before installing. version Install a specific version of the package, e.g. 1.0.9-r1. Ignored if "pkgs" or "sources" is passed. slot Similar to version, but specifies a valid slot to be installed. It will install the latest available version in the specified slot. Ignored if "pkgs" or "sources" or "version" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install sys-devel/gcc slot='4.4' fromrepo Similar to slot, but specifies the repository from the package will be installed. It will install the latest available version in the specified repository. Ignored if "pkgs" or "sources" or "version" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install salt fromrepo='gentoo' uses Similar to slot, but specifies a list of use flag. Ignored if "pkgs" or "sources" or "version" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install sys-devel/gcc uses='["nptl","-nossp"]' Multiple Package Installation Options: pkgs A list of packages to install from the portage tree. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar","~category/package:slot::repository[use]"]' sources A list of tbz2 packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. CLI Example: .. code-block:: bash salt '*' pkg.install sources='[{"foo": "salt://foo.tbz2"},{"bar": "salt://bar.tbz2"}]' binhost has two options try and force. try - tells emerge to try and install the package from a configured binhost. force - forces emerge to install the package from a binhost otherwise it fails out. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} ''' log.debug('Called modules.pkg.install: {0}'.format( { 'name': name, 'refresh': refresh, 'pkgs': pkgs, 'sources': sources, 'kwargs': kwargs, 'binhost': binhost, } )) if salt.utils.is_true(refresh): refresh_db() try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) # Handle version kwarg for a single package target if pkgs is None and sources is None: version_num = kwargs.get('version') if version_num: pkg_params = {name: version_num} else: version_num = '' if slot is not None: version_num += ':{0}'.format(slot) if fromrepo is not None: version_num += '::{0}'.format(fromrepo) if uses is not None: version_num += '["{0}"]'.format('","'.join(uses)) pkg_params = {name: version_num} if pkg_params is None or len(pkg_params) == 0: return {} elif pkg_type == 'file': emerge_opts = ['tbz2file'] else: emerge_opts = [] if binhost == 'try': bin_opts = ['-g'] elif binhost == 'force': bin_opts = ['-G'] else: bin_opts = [] changes = {} if pkg_type == 'repository': targets = list() for param, version_num in six.iteritems(pkg_params): original_param = param param = _p_to_cp(param) if param is None: raise portage.dep.InvalidAtom(original_param) if version_num is None: targets.append(param) else: keyword = None match = re.match('^(~)?([<>])?(=)?([^<>=]*)$', version_num) if match: keyword, gt_lt, eq, verstr = match.groups() prefix = gt_lt or '' prefix += eq or '' # We need to delete quotes around use flag list elements verstr = verstr.replace("'", "") # If no prefix characters were supplied and verstr contains a version, use '=' if len(verstr) > 0 and verstr[0] != ':' and verstr[0] != '[': prefix = prefix or '=' target = '"{0}{1}-{2}"'.format(prefix, param, verstr) else: target = '"{0}{1}"'.format(param, verstr) else: target = '"{0}"'.format(param) if '[' in target: old = __salt__['portage_config.get_flags_from_package_conf']('use', target[1:-1]) __salt__['portage_config.append_use_flags'](target[1:-1]) new = __salt__['portage_config.get_flags_from_package_conf']('use', target[1:-1]) if old != new: changes[param + '-USE'] = {'old': old, 'new': new} target = target[:target.rfind('[')] + '"' if keyword is not None: __salt__['portage_config.append_to_package_conf']('accept_keywords', target[1:-1], ['~ARCH']) changes[param + '-ACCEPT_KEYWORD'] = {'old': '', 'new': '~ARCH'} if not changes: inst_v = version(param) if latest_version(param) == inst_v: all_uses = __salt__['portage_config.get_cleared_flags'](param) if _flags_changed(*all_uses): changes[param] = {'version': inst_v, 'old': {'use': all_uses[0]}, 'new': {'use': all_uses[1]}} targets.append(target) else: targets = pkg_params cmd = ['emerge', '--ask', 'n', '--quiet'] cmd.extend(bin_opts) cmd.extend(emerge_opts) cmd.extend(targets) old = list_pkgs() call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) if call['retcode'] != 0: return _process_emerge_err(call['stdout'], call['stderr']) new = list_pkgs() changes.update(salt.utils.compare_dicts(old, new)) return changes def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None): ''' Updates the passed package (emerge --update package) slot Restrict the update to a particular slot. It will update to the latest version within the slot. fromrepo Restrict the update to a particular repository. It will update to the latest version within the repository. binhost has two options try and force. try - tells emerge to try and install the package from a configured binhost. force - forces emerge to install the package from a binhost otherwise it fails out. Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.update <package name> ''' if salt.utils.is_true(refresh): refresh_db() full_atom = pkg if slot is not None: full_atom = '{0}:{1}'.format(full_atom, slot) if fromrepo is not None: full_atom = '{0}::{1}'.format(full_atom, fromrepo) if binhost == 'try': bin_opts = ['-g'] elif binhost == 'force': bin_opts = ['-G'] else: bin_opts = [] old = list_pkgs() cmd = ['emerge', '--ask', 'n', '--quiet', '--update', '--newuse', '--oneshot'] cmd.extend(bin_opts) cmd.append(full_atom) call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) if call['retcode'] != 0: return _process_emerge_err(call['stdout'], call['stderr']) new = list_pkgs() return salt.utils.compare_dicts(old, new) def upgrade(refresh=True, binhost=None, backtrack=3): ''' Run a full system upgrade (emerge -uDN @world) binhost has two options try and force. try - tells emerge to try and install the package from a configured binhost. force - forces emerge to install the package from a binhost otherwise it fails out. backtrack Specifies an integer number of times to backtrack if dependency calculation fails due to a conflict or an unsatisfied dependency (default: ´3´). .. versionadded: 2015.8.0 Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': ''} if salt.utils.is_true(refresh): refresh_db() if binhost == 'try': bin_opts = ['--getbinpkg'] elif binhost == 'force': bin_opts = ['--getbinpkgonly'] else: bin_opts = [] old = list_pkgs() cmd = ['emerge', '--ask', 'n', '--quiet', '--backtrack', '{0}'.format(backtrack), '--update', '--newuse', '--deep'] if bin_opts: cmd.extend(bin_opts) cmd.append('@world') call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: ret['result'] = False if 'stderr' in call: ret['comment'] += call['stderr'] if 'stdout' in call: ret['comment'] += call['stdout'] else: __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret['changes'] = salt.utils.compare_dicts(old, new) return ret def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs): ''' Remove packages via emerge --unmerge. name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if ``name`` is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package name> slot=4.4 fromrepo=gentoo salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() if name and not pkgs and (slot is not None or fromrepo is not None)and len(pkg_params) == 1: fullatom = name if slot is not None: targets = ['{0}:{1}'.format(fullatom, slot)] if fromrepo is not None: targets = ['{0}::{1}'.format(fullatom, fromrepo)] targets = [fullatom] else: targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = ['emerge', '--ask', 'n', '--quiet', '--unmerge', '--quiet-unmerge-warn'] + targets __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() return salt.utils.compare_dicts(old, new) def purge(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs): ''' Portage does not have a purge, this function calls remove followed by depclean to emulate a purge process name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if name is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> salt '*' pkg.purge <package name> slot=4.4 salt '*' pkg.purge <package1>,<package2>,<package3> salt '*' pkg.purge pkgs='["foo", "bar"]' ''' ret = remove(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs) ret.update(depclean(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs)) return ret def depclean(name=None, slot=None, fromrepo=None, pkgs=None): ''' Portage has a function to remove unused dependencies. If a package is provided, it will only removed the package if no other package depends on it. name The name of the package to be cleaned. slot Restrict the remove to a specific slot. Ignored if ``name`` is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. pkgs Clean multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. Return a list containing the removed packages: CLI Example: .. code-block:: bash salt '*' pkg.depclean <package name> ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() if name and not pkgs and (slot is not None or fromrepo is not None)and len(pkg_params) == 1: fullatom = name if slot is not None: targets = ['{0}:{1}'.format(fullatom, slot)] if fromrepo is not None: targets = ['{0}::{1}'.format(fullatom, fromrepo)] targets = [fullatom] else: targets = [x for x in pkg_params if x in old] cmd = ['emerge', '--ask', 'n', '--quiet', '--depclean'] + targets __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() return salt.utils.compare_dicts(old, new) def version_cmp(pkg1, pkg2): ''' Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem making the comparison. CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2.4-0' '0.2.4.1-0' ''' regex = r'^~?([^:\[]+):?[^\[]*\[?.*$' ver1 = re.match(regex, pkg1) ver2 = re.match(regex, pkg2) if ver1 and ver2: return portage.versions.vercmp(ver1.group(1), ver2.group(1)) return None def version_clean(version): ''' Clean the version string removing extra data. CLI Example: .. code-block:: bash salt '*' pkg.version_clean <version_string> ''' return re.match(r'^~?[<>]?=?([^<>=:\[]+).*$', version) def check_extra_requirements(pkgname, pkgver): ''' Check if the installed package already has the given requirements. CLI Example: .. code-block:: bash salt '*' pkg.check_extra_requirements 'sys-devel/gcc' '~>4.1.2:4.1::gentoo[nls,fortran]' ''' keyword = None match = re.match('^(~)?([<>])?(=)?([^<>=]*)$', pkgver) if match: keyword, gt_lt, eq, verstr = match.groups() prefix = gt_lt or '' prefix += eq or '' # We need to delete quotes around use flag list elements verstr = verstr.replace("'", "") # If no prefix characters were supplied and verstr contains a version, use '=' if verstr[0] != ':' and verstr[0] != '[': prefix = prefix or '=' atom = '{0}{1}-{2}'.format(prefix, pkgname, verstr) else: atom = '{0}{1}'.format(pkgname, verstr) else: return True cpv = _porttree().dbapi.xmatch('bestmatch-visible', atom) if cpv == '': return False try: cur_repo, cur_use = _vartree().dbapi.aux_get(cpv, ['repository', 'USE']) except KeyError: return False des_repo = re.match(r'^.+::([^\[]+).*$', atom) if des_repo and des_repo.group(1) != cur_repo: return False des_uses = set(portage.dep.dep_getusedeps(atom)) cur_use = cur_use.split() if len([x for x in des_uses.difference(cur_use) if x[0] != '-' or x[1:] in cur_use]) > 0: return False if keyword: if not __salt__['portage_config.has_flag']('accept_keywords', atom, '~ARCH'): return False return True
# Copyright (c) 2015,2016,2017 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Tests for the `skewt` module.""" import matplotlib from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import numpy as np import pytest from metpy.plots import Hodograph, SkewT from metpy.testing import check_and_silence_deprecation # Fixtures to make sure we have the right backend and consistent round from metpy.testing import patch_round, set_agg_backend # noqa: F401, I202 from metpy.units import units MPL_VERSION = matplotlib.__version__[0] @pytest.mark.mpl_image_compare(tolerance={'2': 6.45}.get(MPL_VERSION, 0.02), remove_text=True, style='default') def test_skewt_api(): """Test the SkewT API.""" with matplotlib.rc_context({'axes.autolimit_mode': 'data'}): fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot p = np.linspace(1000, 100, 10) t = np.linspace(20, -20, 10) u = np.linspace(-10, 10, 10) skew.plot(p, t, 'r') skew.plot_barbs(p, u, u) skew.ax.set_xlim(-20, 30) skew.ax.set_ylim(1000, 100) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_subplot(): """Test using SkewT on a sub-plot.""" fig = plt.figure(figsize=(9, 9)) SkewT(fig, subplot=(2, 2, 1)) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_gridspec(): """Test using SkewT on a sub-plot.""" fig = plt.figure(figsize=(9, 9)) gs = GridSpec(1, 2) SkewT(fig, subplot=gs[0, 1]) return fig def test_skewt_with_grid_enabled(): """Test using SkewT when gridlines are already enabled (#271).""" with plt.rc_context(rc={'axes.grid': True}): # Also tests when we don't pass in Figure SkewT() @pytest.mark.mpl_image_compare(tolerance=0., remove_text=True, style='default') def test_skewt_arbitrary_rect(): """Test placing the SkewT in an arbitrary rectangle.""" fig = plt.figure(figsize=(9, 9)) SkewT(fig, rect=(0.15, 0.35, 0.8, 0.3)) return fig def test_skewt_subplot_rect_conflict(): """Test the subplot/rect conflict failure.""" with pytest.raises(ValueError): SkewT(rect=(0.15, 0.35, 0.8, 0.3), subplot=(1, 1, 1)) @pytest.fixture() def test_profile(): """Return data for a test profile.""" return np.linspace(1000, 100, 10), np.linspace(20, -20, 10), np.linspace(25, -30, 10) @pytest.mark.mpl_image_compare(tolerance={'2': 0.89}.get(MPL_VERSION, 0.02), remove_text=True, style='default') def test_skewt_shade_cape_cin(test_profile): """Test shading CAPE and CIN on a SkewT plot.""" p, t, tp = test_profile with matplotlib.rc_context({'axes.autolimit_mode': 'data'}): fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.plot(p, t, 'r') skew.plot(p, tp, 'k') skew.shade_cape(p, t, tp) skew.shade_cin(p, t, tp) skew.ax.set_xlim(-50, 50) skew.ax.set_ylim(1000, 100) return fig @pytest.mark.mpl_image_compare(tolerance=0.02, remove_text=True, style='default') def test_skewt_shade_area(test_profile): """Test shading areas on a SkewT plot.""" p, t, tp = test_profile with matplotlib.rc_context({'axes.autolimit_mode': 'data'}): fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.plot(p, t, 'r') skew.plot(p, tp, 'k') skew.shade_area(p, t, tp) skew.ax.set_xlim(-50, 50) skew.ax.set_ylim(1000, 100) return fig def test_skewt_shade_area_invalid(test_profile): """Test shading areas on a SkewT plot.""" p, t, tp = test_profile fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.plot(p, t, 'r') skew.plot(p, tp, 'k') with pytest.raises(ValueError): skew.shade_area(p, t, tp, which='positve') @pytest.mark.mpl_image_compare(tolerance=0.02, remove_text=True, style='default') def test_skewt_shade_area_kwargs(test_profile): """Test shading areas on a SkewT plot with kwargs.""" p, t, tp = test_profile with matplotlib.rc_context({'axes.autolimit_mode': 'data'}): fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.plot(p, t, 'r') skew.plot(p, tp, 'k') skew.shade_area(p, t, tp, facecolor='m') skew.ax.set_xlim(-50, 50) skew.ax.set_ylim(1000, 100) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_wide_aspect_ratio(test_profile): """Test plotting a skewT with a wide aspect ratio.""" p, t, tp = test_profile fig = plt.figure(figsize=(12.5, 3)) skew = SkewT(fig) skew.plot(p, t, 'r') skew.plot(p, tp, 'k') skew.ax.set_xlim(-30, 50) skew.ax.set_ylim(1050, 700) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_api(): """Basic test of Hodograph API.""" fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(1, 1, 1) hodo = Hodograph(ax, component_range=60) hodo.add_grid(increment=5, color='k') hodo.plot([1, 10], [1, 10], color='red') hodo.plot_colormapped(np.array([1, 3, 5, 10]), np.array([2, 4, 6, 11]), np.array([0.1, 0.3, 0.5, 0.9]), cmap='Greys') return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_units(): """Test passing unit-ed quantities to Hodograph.""" fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(1, 1, 1) hodo = Hodograph(ax) u = np.arange(10) * units.kt v = np.arange(10) * units.kt hodo.plot(u, v) hodo.plot_colormapped(u, v, np.sqrt(u * u + v * v), cmap='Greys') ax.set_xlabel('') ax.set_ylabel('') return fig def test_hodograph_alone(): """Test to create Hodograph without specifying axes.""" Hodograph() @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_plot_colormapped(): """Test hodograph colored line with NaN values.""" u = np.arange(5., 65., 5) v = np.arange(-5., -65., -5) u[3] = np.nan v[6] = np.nan fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(1, 1, 1) hodo = Hodograph(ax, component_range=80) hodo.add_grid(increment=20, color='k') hodo.plot_colormapped(u, v, np.hypot(u, v), cmap='Greys') return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_barb_color(): """Test plotting colored wind barbs on the Skew-T.""" fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) p = np.linspace(1000, 100, 10) u = np.linspace(-10, 10, 10) skew.plot_barbs(p, u, u, c=u) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_barb_unit_conversion(): """Test that barbs units can be converted at plot time (#737).""" u_wind = np.array([3.63767155210412]) * units('m/s') v_wind = np.array([3.63767155210412]) * units('m/s') p_wind = np.array([500]) * units.hPa fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.ax.set_ylabel('') # remove_text doesn't do this as of pytest 0.9 skew.plot_barbs(p_wind, u_wind, v_wind, plot_units='knots') skew.ax.set_ylim(1000, 500) skew.ax.set_yticks([1000, 750, 500]) skew.ax.set_xlim(-20, 20) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default') def test_skewt_barb_no_default_unit_conversion(): """Test that barbs units are left alone by default (#737).""" u_wind = np.array([3.63767155210412]) * units('m/s') v_wind = np.array([3.63767155210412]) * units('m/s') p_wind = np.array([500]) * units.hPa fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) skew.ax.set_ylabel('') # remove_text doesn't do this as of pytest 0.9 skew.plot_barbs(p_wind, u_wind, v_wind) skew.ax.set_ylim(1000, 500) skew.ax.set_yticks([1000, 750, 500]) skew.ax.set_xlim(-20, 20) return fig @pytest.mark.parametrize('u,v', [(np.array([3]) * units('m/s'), np.array([3])), (np.array([3]), np.array([3]) * units('m/s'))]) def test_skewt_barb_unit_conversion_exception(u, v): """Test that errors are raise if unit conversion is requested on un-united data.""" p_wind = np.array([500]) * units.hPa fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) with pytest.raises(ValueError): skew.plot_barbs(p_wind, u, v, plot_units='knots') @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_plot_layers(): """Test hodograph colored height layers with interpolation.""" u = np.zeros((6)) * units.knots v = np.array([0, 10, 20, 30, 40, 50]) * units.knots heights = np.array([0, 1000, 2000, 3000, 4000, 5000]) * units.m intervals = np.array([500, 1500, 2500, 3500, 4500]) * units.m colors = ['r', 'g', 'b', 'r'] fig = plt.figure(figsize=(7, 7)) ax1 = fig.add_subplot(1, 1, 1) h = Hodograph(ax1) h.add_grid(increment=10) h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals) ax1.set_xlim(-50, 50) ax1.set_ylim(-5, 50) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_plot_layers_different_units(): """Test hodograph colored height layers with interpolation and different units.""" u = np.zeros((6)) * units.knots v = np.array([0, 10, 20, 30, 40, 50]) * units.knots heights = np.array([0, 1, 2, 3, 4, 5]) * units.km intervals = np.array([500, 1500, 2500, 3500, 4500]) * units.m colors = ['r', 'g', 'b', 'r'] fig = plt.figure(figsize=(7, 7)) ax1 = fig.add_subplot(1, 1, 1) h = Hodograph(ax1) h.add_grid(increment=10) h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals) ax1.set_xlim(-50, 50) ax1.set_ylim(-5, 50) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_plot_layers_bound_units(): """Test hodograph colored height layers with interpolation and different units.""" u = np.zeros((6)) * units.knots v = np.array([0, 10, 20, 30, 40, 50]) * units.knots heights = np.array([0, 1000, 2000, 3000, 4000, 5000]) * units.m intervals = np.array([0.5, 1.5, 2.5, 3.5, 4.5]) * units.km colors = ['r', 'g', 'b', 'r'] fig = plt.figure(figsize=(7, 7)) ax1 = fig.add_subplot(1, 1, 1) h = Hodograph(ax1) h.add_grid(increment=10) h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals) ax1.set_xlim(-50, 50) ax1.set_ylim(-5, 50) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_plot_arbitrary_layer(): """Test hodograph colored layers for arbitrary variables without interpolation.""" u = np.arange(5, 65, 5) * units('knot') v = np.arange(-5, -65, -5) * units('knot') speed = np.sqrt(u ** 2 + v ** 2) colors = ['red', 'green', 'blue'] levels = [0, 10, 20, 30] * units('knot') fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(1, 1, 1) hodo = Hodograph(ax, component_range=80) hodo.add_grid(increment=20, color='k') hodo.plot_colormapped(u, v, speed, intervals=levels, colors=colors) return fig @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) def test_hodograph_wind_vectors(): """Test plotting wind vectors onto a hodograph.""" u_wind = np.array([-10, -7, 0, 7, 10, 7, 0, -7]) v_wind = np.array([0, 7, 10, 7, 0, -7, -10, -7]) fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(1, 1, 1) h = Hodograph(ax, component_range=20) h.plot(u_wind, v_wind, linewidth=3) h.wind_vectors(u_wind, v_wind) return fig @pytest.mark.xfail def test_united_hodograph_range(): """Tests making a hodograph with a united ranged.""" fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(1, 1, 1) Hodograph(ax, component_range=60. * units.knots) @check_and_silence_deprecation def test_plot_colormapped_bounds_deprecation(): """Test deprecation of bounds kwarg in `plot_colormapped`.""" u = np.zeros((6)) * units.knots v = np.array([0, 10, 20, 30, 40, 50]) * units.knots heights = np.array([0, 1000, 2000, 3000, 4000, 5000]) * units.m intervals = np.array([500, 1500, 2500, 3500, 4500]) * units.m colors = ['r', 'g', 'b', 'r'] fig = plt.figure(figsize=(7, 7)) ax1 = fig.add_subplot(1, 1, 1) h = Hodograph(ax1) h.plot_colormapped(u, v, heights, colors=colors, bounds=intervals)
# Comment import sys import time def foobar(string, count=10, sleep=False): """ Docstring for the method. """ string = str(string) count = int(count) for i in range(count): print(string) if sleep: time.sleep(1)
import os import glob import pytest import pdb from VCF.VcfUtils import VcfUtils # test_VcfUtils.py @pytest.fixture def vcf_object(bcftools_folder, bgzip_folder, gatk_jar_folder, datadir): """Returns a VcfUtils object""" vcf_file = "{0}/test.vcf.gz".format(datadir) vcflist = ['test.vcf.gz','test1.vcf.gz'] vcflist = [ "{0}/{1}".format(datadir, x) for x in vcflist] vcf_object = VcfUtils(vcf=vcf_file, bgzip_folder=bgzip_folder, vcflist=vcflist, bcftools_folder=bcftools_folder, gatk_folder=gatk_jar_folder) return vcf_object @pytest.fixture def vcf_ambiguity(datadir): """ Returns a VcfUtils object that contains the REF or ALT column with some ambiguity codes """ vcf_file = "{0}/test.amb.vcf.gz".format(datadir) vcf_object = VcfUtils(vcf=vcf_file) return vcf_object def test_correct_ambiguity(vcf_ambiguity, datadir): outfile = vcf_ambiguity.correct_ambiguity_codes(outfile="{0}/outdir/test.corrected.vcf.gz".format(datadir)) assert os.path.exists("{0}/outdir/test.corrected.vcf.gz".format(datadir)) def test_add_to_header(vcf_ambiguity, datadir): outfile = vcf_ambiguity.add_to_header(header_f="{0}/newheader.txt".format(datadir), outfilename="{0}/outdir/modified_header.txt".format(datadir), line_ann='##INFO=test"') assert os.path.exists("{0}/outdir/modified_header.txt".format(datadir)) def test_vcf_reheader(vcf_object, datadir): outfile = vcf_object.reheader(newheader="{0}/newheader.txt".format(datadir), outprefix="{0}/outdir/test1".format(datadir), verbose=True) assert os.path.exists("{0}/outdir/test1.reheaded.vcf.gz".format(datadir)) def test_vcf_reheader_with_samplef(vcf_object, datadir): """ Test the reheader method and add new sample names """ outfile = vcf_object.reheader(newheader="{0}/newheader.txt".format(datadir), samplefile="{0}/samples.txt".format(datadir), outprefix="{0}/outdir/test2".format(datadir)) assert os.path.exists("{0}/outdir/test2.reheaded.vcf.gz".format(datadir)) def test_combine_uncompressed(vcf_object, datadir, clean_tmp): """ Test the combine method producing a VCF """ vcf_object.combine(labels=['gatk', 'lc_bcftools'], reference="{0}/exampleFASTA.fasta".format(datadir), outprefix='out_combine', outdir="{0}/outdir/".format(datadir), verbose=True, genotypemergeoption='UNIQUIFY') assert os.path.exists("{0}/outdir/out_combine.vcf".format(datadir)) assert os.path.exists("{0}/outdir/out_combine.vcf.idx".format(datadir)) def test_combine_compressed(vcf_object, datadir, clean_tmp): """ Test the combine method producing a VCF.gz file and passing also some options """ vcf_object.combine(labels=['gatk', 'lc_bcftools'], reference="{0}/exampleFASTA.fasta".format(datadir), outprefix='out_combine', outdir="{0}/outdir/".format(datadir), compress=True, genotypemergeoption='UNIQUIFY', options=['-env', '-sites_only', '--filteredAreUncalled']) assert os.path.exists("{0}/outdir/out_combine.vcf.gz".format(datadir)) def test_change_chrnames_2ensembl(vcf_object, datadir): """ Test the method to change the style of the chrnames (from UCSC to Ensembl) """ vcf_object.rename_chros(chr_types='ensembl', outfile="{0}/outdir/test.ensembl.vcf.gz".format(datadir)) vcf_object.rename_chros(chr_types='ensembl', compress=False, outfile="{0}/outdir/test.ensembl.vcf".format(datadir)) assert os.path.exists("{0}/outdir/test.ensembl.vcf.gz".format(datadir)) assert os.path.exists("{0}/outdir/test.ensembl.vcf".format(datadir)) def test_change_chrnames_2ucsc(datadir, bgzip_folder, clean_tmp): """ Test the method to change the style of the chrnames (from Ensembl to UCSC) """ vcf_object = VcfUtils(vcf="{0}/outdir/test.ensembl.vcf.gz".format(datadir), bgzip_folder=bgzip_folder) vcf_object.rename_chros(chr_types='ucsc', outfile="{0}/outdir/test.ucsc.vcf.gz".format(datadir)) vcf_object.rename_chros(chr_types='ucsc', outfile="{0}/outdir/test.ucsc.vcf".format(datadir), compress=False) assert os.path.exists("{0}/outdir/test.ucsc.vcf.gz".format(datadir)) assert os.path.exists("{0}/outdir/test.ucsc.vcf".format(datadir)) def test_drop_genotypes(vcf_object, datadir, clean_tmp): """ Test the method to drop the genotype information from a VCF file """ vcf_object.drop_genotypes(outfile="{0}/outdir/test.sites.vcf.gz".format(datadir), verbose=True) assert os.path.exists("{0}/outdir/test.sites.vcf.gz".format(datadir)) def test_drop_info(vcf_object, datadir, clean_tmp): """ Test the method to drop the INFO annotation from a VCF file """ vcf_object.drop_info(outfile="{0}/outdir/test.noinfo.vcf.gz".format(datadir), verbose=True) assert os.path.exists("{0}/outdir/test.noinfo.vcf.gz".format(datadir)) def test_convert_PL2GL(datadir, bcftools_folder, clean_tmp): ''' Test the method change PL fields to GL in a VCF file ''' vcf_object = VcfUtils(vcf="{0}/test.gatk.vcf.gz".format(datadir), bcftools_folder=bcftools_folder) vcf_object.convert_PL2GL(outfile="{0}/outdir/test.gatk.GL.vcf.gz".format(datadir), verbose=True) assert os.path.exists("{0}/outdir/test.gatk.GL.vcf.gz".format(datadir))
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 系统内部使用工具 Case Name : 使用pg_config -V命令打印pg_config的版本信息然后退出 Description : 1.使用pg_config工具打印版本信息 Expect : 1.使用pg_config工具打印版本信息失败 History : """ import unittest from testcase.utils.Constant import Constant from testcase.utils.Logger import Logger from yat.test import Node from yat.test import macro LOG = Logger() class LogicalReplication(unittest.TestCase): def setUp(self): LOG.info('----------------this is setup-----------------------') LOG.info( '---Opengauss_Function_Tools_pg_config_Case0024开始执行-----') self.constant = Constant() self.user_node = Node('dbuser') self.env_path = macro.DB_ENV_PATH self.user_node = Node('PrimaryDbUser') def test_system_internal_tools(self): LOG.info( '---------------------查看版本信息-------------------------') excute_cmd = f'''source {self.env_path}; pg_config -V; ''' LOG.info(excute_cmd) msg = self.user_node.sh(excute_cmd).result() LOG.info(msg) self.assertIn('invalid argument', msg) def tearDown(self): LOG.info('----------------this is tearDown-----------------------') # 无需清理环境 LOG.info( '-----Opengauss_Function_Tools_pg_config_Case0024执行完成---')
""" ============= ============= """ import importlib import os import sys import click from . import ff, with_appcontext, app def get_migrations_root(migrations_root): migrations_root = migrations_root or os.path.join( os.environ.get('FANTASY_MIGRATION_PATH', os.environ['FANTASY_WORKSPACE']), 'migrations') return os.path.expanduser(migrations_root) @ff.command() @click.option('--migrations-root', type=click.Path(exists=False)) @with_appcontext def makemigrations(migrations_root): """a command same as django makemigrations migrations path search order: 1. migrations_root set by user 1. environment: FANTASY_MIGRATION_PATH 1. environment: FANTASY_WORKSPACE + /migrations """ from flask_migrate import (Migrate, init as migrate_init, migrate as migrate_exec) migrations_root = get_migrations_root(migrations_root) mig = Migrate(app, app.db, directory=migrations_root) if not os.path.exists(migrations_root): migrate_init(migrations_root) pass models_file = os.path.join(migrations_root, 'models.txt') if not os.path.exists(models_file): with open(models_file, 'w') as fw: fw.write('# add module name in this file.') pass pass with open(models_file, 'r') as fp: modules = fp.readlines() pass modules = filter(lambda x: x.strip("\n"), modules) modules = map(lambda x: x.strip("\n").split("#")[0].strip(), modules) modules = list(filter(lambda x: x, modules)) if not modules: click.echo( click.style('No Model found,' 'skip create migrations...' 'You need edit %s file set your module' % models_file, fg='yellow')) sys.exit(0) for m in modules: importlib.import_module(m + '.models') pass migrate_exec(migrations_root) mig.init_app(app, app.db) pass @ff.command() @click.option('--migrations-root', type=click.Path(exists=False)) @with_appcontext def migrate(migrations_root): """a command same as django migrate ..note:: if database not exist, will create it. the default charset use """ from flask_migrate import Migrate, upgrade as migrate_upgrade from flask_sqlalchemy import SQLAlchemy from sqlalchemy.engine.url import make_url from sqlalchemy_utils import database_exists, create_database if not app.config['SQLALCHEMY_DATABASE_URI']: click.echo( click.style( 'no SQLALCHEMY_DATABASE_URI config found,skip migrate...', fg='red')) sys.exit(-1) dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI']) if not database_exists(dsn): create_database(dsn, encoding=app.config.get('SQLALCHEMY_DATABASE_CHARSET', 'utf8mb4')) pass migrations_root = get_migrations_root(migrations_root) if not os.path.exists(migrations_root): click.echo( click.style('migration files not exist,skip migrate...', fg='red')) sys.exit(-1) db = SQLAlchemy() mig = Migrate(app, db, directory=migrations_root) mig.init_app(app, db) migrate_upgrade(migrations_root) pass
def get_db_cols(cur, table_name, schema='public', type_map=True): """ Gets the column names of a given table if type_map is true, returns also a dictionary mapping each column name to the corresponding postgres column type """ db_cols_sql = """SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'; """.format(schema, table_name) cur.execute(db_cols_sql) res_rows = [row for row in cur][1:] cols = [row[0] for row in res_rows] if type_map: return cols, dict(res_rows) return cols def add_columns(cur, table_name, new_columns, type_map): """ Add new columns to a database table 'type_map' is a dictionary mapping the columns name to the corresponding postgres column type """ alter_sql = "ALTER TABLE {} {};".format( table_name, ','.join( map(lambda x: 'ADD COLUMN {} {}'.format(x, type_map.get(x)), new_columns))) cur.execute(alter_sql)
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import logging from ast import literal_eval from odoo import fields, models, _, api from odoo.exceptions import UserError from odoo.fields import Datetime _logger = logging.getLogger(__name__) class Employee(models.AbstractModel): _inherit = 'hr.employee.base' email_sent = fields.Boolean(default=False) ip_connected = fields.Boolean(default=False) manually_set_present = fields.Boolean(default=False) # Stored field used in the presence kanban reporting view # to allow group by state. hr_presence_state_display = fields.Selection([ ('present', 'Present'), ('absent', 'Absent'), ('to_define', 'To Define')]) def _compute_presence_state(self): super()._compute_presence_state() employees = self.filtered(lambda employee: employee.hr_presence_state != 'present' and not employee.is_absent) company = self.env.company for employee in employees: if not employee.is_absent and company.hr_presence_last_compute_date and company.hr_presence_last_compute_date.day == Datetime.now().day and \ (employee.email_sent or employee.ip_connected or employee.manually_set_present): employee.hr_presence_state = 'present' @api.model def _check_presence(self): company = self.env.company if not company.hr_presence_last_compute_date or \ company.hr_presence_last_compute_date.day != Datetime.now().day: self.env['hr.employee'].search([ ('company_id', '=', company.id) ]).write({ 'email_sent': False, 'ip_connected': False, 'manually_set_present': False }) employees = self.env['hr.employee'].search([('company_id', '=', company.id)]) all_employees = employees # Check on IP if literal_eval(self.env['ir.config_parameter'].sudo().get_param('hr.hr_presence_control_ip', 'False')): ip_list = company.hr_presence_control_ip_list ip_list = ip_list.split(',') if ip_list else [] ip_employees = self.env['hr.employee'] for employee in employees: employee_ips = self.env['res.users.log'].search([ ('create_uid', '=', employee.user_id.id), ('ip', '!=', False), ('create_date', '>=', Datetime.to_string(Datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)))] ).mapped('ip') if any([ip in ip_list for ip in employee_ips]): ip_employees |= employee ip_employees.write({'ip_connected': True}) employees = employees - ip_employees # Check on sent emails if literal_eval(self.env['ir.config_parameter'].sudo().get_param('hr.hr_presence_control_email', 'False')): email_employees = self.env['hr.employee'] threshold = company.hr_presence_control_email_amount for employee in employees: sent_emails = self.env['mail.message'].search_count([ ('author_id', '=', employee.user_id.partner_id.id), ('date', '>=', Datetime.to_string(Datetime.now().replace(hour=0, minute=0, second=0, microsecond=0))), ('date', '<=', Datetime.to_string(Datetime.now()))]) if sent_emails >= threshold: email_employees |= employee email_employees.write({'email_sent': True}) employees = employees - email_employees company.sudo().hr_presence_last_compute_date = Datetime.now() for employee in all_employees: employee.hr_presence_state_display = employee.hr_presence_state @api.model def _action_open_presence_view(self): # Compute the presence/absence for the employees on the same # company than the HR/manager. Then opens the kanban view # of the employees with an undefined presence/absence _logger.info("Employees presence checked by: %s" % self.env.user.name) self._check_presence() return { "type": "ir.actions.act_window", "res_model": "hr.employee", "views": [[self.env.ref('hr_presence.hr_employee_view_kanban').id, "kanban"], [False, "tree"], [False, "form"]], 'view_mode': 'kanban,tree,form', "domain": [], "name": "Employee's Presence to Define", "context": {'search_default_group_hr_presence_state': 1}, } def action_set_present(self): if not self.env.user.has_group('hr.group_hr_manager'): raise UserError(_("You don't have the right to do this. Please contact an Administrator.")) self.write({'manually_set_present': True}) def write(self, vals): if vals.get('hr_presence_state_display') == 'present': vals['manually_set_present'] = True return super().write(vals) def action_open_leave_request(self): self.ensure_one() return { "type": "ir.actions.act_window", "res_model": "hr.leave", "views": [[False, "form"]], "view_mode": 'form', "context": {'default_employee_id': self.id}, } # -------------------------------------------------- # Messaging # -------------------------------------------------- def action_send_sms(self): self.ensure_one() if not self.env.user.has_group('hr.group_hr_manager'): raise UserError(_("You don't have the right to do this. Please contact an Administrator.")) if not self.mobile_phone: raise UserError(_("There is no professional mobile for this employee.")) context = dict(self.env.context) context.update(default_res_model='hr.employee', default_res_id=self.id, default_composition_mode='comment', default_number_field_name='mobile_phone') template = self.env.ref('hr_presence.sms_template_presence', False) if not template: context['default_body'] = _("""Exception made if there was a mistake of ours, it seems that you are not at your office and there is not request of leaves from you. Please, take appropriate measures in order to carry out this work absence. Do not hesitate to contact your manager or the human resource department.""") else: context['default_template_id'] = template.id return { "type": "ir.actions.act_window", "res_model": "sms.composer", "view_mode": 'form', "context": context, "name": "Send SMS Text Message", "target": "new", } def action_send_mail(self): self.ensure_one() if not self.env.user.has_group('hr.group_hr_manager'): raise UserError(_("You don't have the right to do this. Please contact an Administrator.")) if not self.work_email: raise UserError(_("There is no professional email address for this employee.")) template = self.env.ref('hr_presence.mail_template_presence', False) compose_form = self.env.ref('mail.email_compose_message_wizard_form', False) ctx = dict( default_model="hr.employee", default_res_id=self.id, default_use_template=bool(template), default_template_id=template.id, default_composition_mode='comment', default_is_log=True, custom_layout='mail.mail_notification_light', ) return { 'name': _('Compose Email'), 'type': 'ir.actions.act_window', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form.id, 'form')], 'view_id': compose_form.id, 'target': 'new', 'context': ctx, }
import typing from .. import exceptions from ..protocol import Protocol, Request, SuccessResponse, ErrorResponse PARSE_ERROR_CODE = -32700 INVALID_REQUEST_CODE = -32600 METHOD_NOT_FOUND_CODE = -32601 INVALID_PARAMS_CODE = -32602 INTERNAL_ERROR_CODE = -32603 MIN_VALID_SERVER_ERROR_CODE = -32099 MAX_VALID_SERVER_ERROR_CODE = -32000 class JSONRPCParseError(exceptions.ParseError, exceptions.RpcError): def __init__(self, message="Parse error"): super().__init__(PARSE_ERROR_CODE, message) class JSONRPCInvalidRequestError(exceptions.InvalidRequestError, exceptions.RpcError): def __init__(self, message="Invalid Request"): super().__init__(INVALID_REQUEST_CODE, message) class JSONRPCMethodNotFoundError(exceptions.MethodNotFoundError, exceptions.RpcError): def __init__(self, message="Method not found"): super().__init__(METHOD_NOT_FOUND_CODE, message) class JSONRPCInvalidParamsError(exceptions.InvalidParamsError, exceptions.RpcError): def __init__(self, message="Invalid params"): super().__init__(INVALID_PARAMS_CODE, message) class JSONRPCInternalError(exceptions.InternalError, exceptions.RpcError): def __init__(self, message="Internal error"): super().__init__(INTERNAL_ERROR_CODE, message) errors_by_code = { PARSE_ERROR_CODE: JSONRPCParseError, INVALID_REQUEST_CODE: JSONRPCInvalidRequestError, METHOD_NOT_FOUND_CODE: JSONRPCMethodNotFoundError, INVALID_PARAMS_CODE: JSONRPCInvalidParamsError, INTERNAL_ERROR_CODE: JSONRPCInternalError, } class JSONRPCServerError(exceptions.ServerError, exceptions.RpcError): @classmethod def check_valid(cls, code): return MIN_VALID_SERVER_ERROR_CODE <= code <= MAX_VALID_SERVER_ERROR_CODE def __init__(self, code: int = MAX_VALID_SERVER_ERROR_CODE): if not self.check_valid(code): raise ValueError("Wrong code for Server error!") super().__init__(code, "Server error") class JSONRPCInvalidRequestError(exceptions.BaseError): pass class JSONRPCRequest(Request): def __init__(self, method: str, uid: typing.Optional[typing.Any] = None, args: typing.Optional[list] = None, kwargs: typing.Optional[dict] = None): if args and kwargs: raise JSONRPCInvalidRequestError('Does not support args and kwargs at the same time.') super().__init__(method, uid, args, kwargs) def data(self): data = { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'method': self.method, } if self.args: data['params'] = self.args if self.kwargs: data['params'] = self.kwargs if self.uid is not None: data['id'] = self.uid return data class JSONRPCSuccessResponse(SuccessResponse): def to_data(self): return { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'id': self.uid, 'result': self.result } class JSONRPCErrorResponse(ErrorResponse): def to_exception(self): error = errors_by_code.get(self.code) if not error and JSONRPCServerError.check_valid(self.code): error = JSONRPCServerError(self.code) if not error: error = exceptions.RpcError(self.code, self.message, self.data) return error def to_data(self): data = { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'id': self.uid, 'error': { 'code': self.code, 'message': self.message, } } if self.data: data['error']['data'] = self.data return data class JSONRPCProtocol(Protocol): """JSONRPC version 2.0 protocol implementation.""" JSON_RPC_VERSION = "2.0" _ALLOWED_REPLY_KEYS = sorted(['id', 'jsonrpc', 'error', 'result']) _ALLOWED_REQUEST_KEYS = sorted(['id', 'jsonrpc', 'method', 'params']) def __init__(self, counter: int = 0): """Creates new protocol object. :type counter: start request id counter value """ self._counter = counter def _get_uid(self): self._counter += 1 return self._counter def create_request(self, method: str, args: list = None, kwargs: dict = None, one_way: bool = False) -> JSONRPCRequest: if args and kwargs: raise JSONRPCInvalidRequestError('Does not support args and kwargs at the same time.') uid = None if one_way else self._get_uid() return JSONRPCRequest(method, uid, args, kwargs) def create_response(self, request: Request, reply: typing.Any) -> JSONRPCSuccessResponse: return JSONRPCSuccessResponse(request.uid, reply) def create_error_response(self, exception: Exception, request: typing.Optional[Request] = None) -> JSONRPCErrorResponse: uid = request.uid if request else None if isinstance(exception, exceptions.RpcError): pass elif isinstance(exception, exceptions.ParseError): exception = JSONRPCParseError() elif isinstance(exception, exceptions.InvalidRequestError): exception = JSONRPCInvalidRequestError() elif isinstance(exception, exceptions.MethodNotFoundError): exception = JSONRPCMethodNotFoundError() elif isinstance(exception, exceptions.InvalidParamsError): exception = JSONRPCInvalidParamsError() elif isinstance(exception, exceptions.InternalError): exception = JSONRPCInternalError() elif isinstance(exception, exceptions.ServerError): exception = JSONRPCServerError() else: exception = JSONRPCInternalError() return JSONRPCErrorResponse(uid, exception.code, exception.message, exception.data) def parse_request(self, data: dict) -> JSONRPCRequest: for k in data.keys(): if k not in self._ALLOWED_REQUEST_KEYS: raise JSONRPCInvalidRequestError('Key not allowed: %s' % k) if data.get('jsonrpc') != self.JSON_RPC_VERSION: raise JSONRPCInvalidRequestError("Wrong or missing jsonrpc version") method = data['method'] if not isinstance(method, str): raise JSONRPCInvalidRequestError("method must be str") uid = data.get('id') if uid and not isinstance(uid, int): raise JSONRPCInvalidRequestError("id must be int") params = data.get('params') args = list() kwargs = dict() if isinstance(params, list): args = params elif isinstance(params, dict): kwargs = params else: raise JSONRPCInvalidParamsError("params must be list or dict") return JSONRPCRequest(method, uid, args, kwargs) def parse_response(self, data: dict) -> typing.Union[JSONRPCSuccessResponse, JSONRPCErrorResponse]: for k in data.keys(): if k not in self._ALLOWED_REPLY_KEYS: raise exceptions.ServerReplyError('Key not allowed: %s' % k) if data.get('jsonrpc') != self.JSON_RPC_VERSION: raise exceptions.ServerReplyError("Wrong or missing jsonrpc version") uid = data.get('id') if uid and not isinstance(uid, (int, str)): raise exceptions.ServerReplyError("id must be int or str or None") if ('error' in data) == ('result' in data): raise exceptions.ServerReplyError('Reply must contain exactly one of result and error.') if 'result' in data: return JSONRPCSuccessResponse(uid, data['result']) else: error = data['error'] code = error.get('code') if not isinstance(code, int): raise exceptions.ServerReplyError("error.code must be int") message = error.get('message') if not isinstance(message, str): raise exceptions.ServerReplyError("error.message must be str") data = error.get('data') return JSONRPCErrorResponse(uid, code, message, data)
import peewee as pw import pytest from muffin_peewee import Plugin as Peewee, JSONField @pytest.fixture(scope='module') def aiolib(): return 'asyncio', {'use_uvloop': False} @pytest.fixture(scope='session', autouse=True) def setup_logging(): import logging logger = logging.getLogger('peewee') logger.setLevel(logging.DEBUG) @pytest.fixture async def db(app): db = Peewee(app, connection='sqlite:///:memory:', auto_connection=False) async with db: async with db.connection(): yield db @pytest.fixture async def Resource(db): @db.manager.register class Resource(pw.Model): active = pw.BooleanField(default=False) name = pw.CharField(null=False) count = pw.IntegerField(null=True) config = JSONField(default={}) assert Resource._manager await db.manager.create_tables(Resource) return Resource @pytest.fixture async def ResourceEndpoint(Resource, api): from muffin_rest.peewee import PWRESTHandler @api.route class ResourceEndpoint(PWRESTHandler): class Meta: filters = 'active', 'name', ('oid', {'field': 'id'}), limit = 10 model = Resource sorting = ('id', {'default': 'desc'}), 'name', Resource.count @PWRESTHandler.route('/resource/action') async def action(self, request, resource=None): """Description for the action.""" resources = await self.meta.manager.fetchall(self.collection) return await self.dump(request, resources) return ResourceEndpoint @pytest.fixture async def resource(Resource, db): return await db.create(Resource, name='test') def test_imports(): from muffin_rest import PWRESTHandler, PWFilter, PWFilters, PWSort, PWSorting assert PWRESTHandler assert PWFilter assert PWFilters assert PWSort assert PWSorting async def test_base(api, ResourceEndpoint, Resource): assert ResourceEndpoint assert ResourceEndpoint.meta.name == 'resource' assert ResourceEndpoint.meta.manager # Schema assert ResourceEndpoint.meta.Schema assert ResourceEndpoint.meta.Schema._declared_fields ff = ResourceEndpoint.meta.Schema._declared_fields['active'] assert ff.load_default is False # Sorting assert ResourceEndpoint.meta.sorting assert list(ResourceEndpoint.meta.sorting.mutations.keys()) == ['id', 'name', 'count'] assert ResourceEndpoint.meta.sorting.default == [Resource.id.desc()] assert api.router.plain['/resource'] assert api.router.dynamic[0].pattern.pattern == '^/resource/(?P<id>[^/]+)$' async def test_get(client, ResourceEndpoint, resource): res = await client.get('/api/resource') assert res.status_code == 200 json = await res.json() assert json assert json[0]['config'] == {} assert json[0]['count'] is None assert json[0]['id'] == '1' assert json[0]['name'] == 'test' res = await client.get('/api/resource/1') assert res.status_code == 200 assert await res.json() == { 'active': False, 'config': {}, 'count': None, 'id': '1', 'name': 'test', } res = await client.get('/api/resource/unknown') assert res.status_code == 404 assert await res.json() == {'error': True, 'message': 'Resource not found'} res = await client.get('/api/resource/action?custom=123') assert res.status_code == 200 json = await res.json() assert json async def test_create(client, ResourceEndpoint): res = await client.post('/api/resource', json={'active': True}) assert res.status_code == 400 json = await res.json() assert json['errors'] assert 'name' in json['errors'] res = await client.post('/api/resource', data={'name': 'test2', 'active': True, 'unknown': 22}) assert res.status_code == 200 json = await res.json() assert json['id'] == '1' assert json['name'] == 'test2' assert json['active'] async def test_edit(client, resource, ResourceEndpoint): res = await client.put('/api/resource/1', data={'name': 'new'}) assert res.status_code == 200 json = await res.json() assert json['name'] == 'new' assert json['id'] == '1' async def test_delete(client, resource, ResourceEndpoint, Resource, db): res = await client.delete('/api/resource/1') assert res.status_code == 200 json = await res.json() assert not json assert not await db.fetchone(Resource.select().where(Resource.id == 1)) async def test_sort(client, ResourceEndpoint, Resource, db): await db.create(Resource, name='test2', count=2) await db.create(Resource, name='test3', count=3) await db.create(Resource, name='test4', count=1) # Default sort res = await client.get('/api/resource') assert res.status_code == 200 json = await res.json() assert json[0]['id'] == '3' assert json[1]['id'] == '2' res = await client.get('/api/resource?sort=-count') assert res.status_code == 200 json = await res.json() assert json[0]['id'] == '2' assert json[1]['id'] == '1' async def test_filters(client, ResourceEndpoint, Resource, db): await db.create(Resource, name='test2', count=2) await db.create(Resource, name='test3', count=3) await db.create(Resource, name='test4', count=1) res = await client.get('/api/resource?where={"name":"test"}') assert res.status_code == 200 json = await res.json() assert len(json) == 0 res = await client.get('/api/resource?where={"name": {"$in": ["test3", "test2"]}}') assert res.status_code == 200 json = await res.json() assert len(json) == 2 res = await client.get('/api/resource?where={"name": {"$starts": "test"}}') assert res.status_code == 200 json = await res.json() assert len(json) == 3 res = await client.get('/api/resource?where={"name": {"$ends": "3"}}') assert res.status_code == 200 json = await res.json() assert len(json) == 1 res = await client.get('/api/resource?where={"oid": {"$between": ["2", "3"]}}') assert res.status_code == 200 json = await res.json() assert len(json) == 2 res = await client.get('/api/resource?where={"oid": {"$gt": "2"}}') assert res.status_code == 200 json = await res.json() assert len(json) == 1 async def test_paginate(client, ResourceEndpoint, Resource, db): for n in range(12): await db.create(Resource, name=f"test{n}") res = await client.get('/api/resource') assert res.status_code == 200 json = await res.json() assert len(json) == 10 res = await client.get('/api/resource?limit=5') assert res.status_code == 200 assert res.headers['x-total'] == '12' assert res.headers['x-limit'] == '5' assert res.headers['x-offset'] == '0' json = await res.json() assert len(json) == 5 res = await client.get('/api/resource?limit=5&offset=9') assert res.status_code == 200 assert res.headers['x-total'] == '12' assert res.headers['x-limit'] == '5' assert res.headers['x-offset'] == '9' json = await res.json() assert len(json) == 3 async def test_batch_ops(client, ResourceEndpoint, db, Resource): # Batch operations (only POST/DELETE are supported for now) res = await client.post('/api/resource', json=[ {'name': 'test3', 'active': True}, {'name': 'test4', 'active': True}, {'name': 'test6', 'active': True}, ]) assert res.status_code == 200 json = await res.json() assert len(json) == 3 assert json[0]['id'] == '1' assert json[1]['id'] == '2' assert json[2]['id'] == '3' res = await client.delete('/api/resource', json=['1', '2', '3']) assert res.status_code == 200 assert not await db.count(Resource.select().where(Resource.id << ('11', '12', '13'))) async def test_openapi(client, ResourceEndpoint): res = await client.get('/api/openapi.json') assert res.status_code == 200 json = await res.json() assert json async def test_endpoint_inheritance(Resource): from muffin_rest.peewee import PWRESTHandler class ResourceEndpoint(PWRESTHandler): class Meta: model = Resource assert ResourceEndpoint.meta.name == 'resource' class ChildEndpoint(ResourceEndpoint): class Meta: name = 'child' assert ChildEndpoint.meta.name == 'child' async def test_aiomodels(client, db, api): events = [] class TestModel(db.Model): data = pw.CharField() async def save(self, **kwargs): events.append('custom-save') return await super().save(**kwargs) async def delete_instance(self, **kwargs): events.append('custom-delete') return await super().delete_instance(**kwargs) await db.create_tables(TestModel) from muffin_rest.peewee import PWRESTHandler @api.route class Test(PWRESTHandler): class Meta: model = TestModel res = await client.post('/api/testmodel', json={'data': 'test'}) assert res.status_code == 200 json = await res.json() assert json['id'] assert events assert 'custom-save' in events res = await client.delete(f"/api/testmodel/{json['id']}") assert res.status_code == 200 assert 'custom-delete' in events
from .uri import URI, URIError from .header import Header __author__ = 'Terry Kerr' __email__ = 't@xnr.ca' __version__ = '0.3.1'
''' Model Evaluation script. The evaluation strategy here is to show the prediction class of an image as an input image path is provided. Therefore, there is no need to use the DataLoader class to load the data. However, if you wish you evaluate in batches, use the LoadDataset class from load_data.py and DataLoader class to load the images. Note that the Evaluation script does not depend on any training parameters from train_cfg. ''' import torch from model import Model import eval_cfg as e_cfg from image_transforms import ToTensor from utils import read_image, evaluate_class def main(): ''' Evaluation function. ''' #Check if a trained model is present. assert e_cfg.TRAINED_MODEL_PRESENCE, "There is no trained model present for evaluation! If a model is already placed in the appropriate folder, please check the name of the model file." vgg = Model(resized_img_size=e_cfg.RESIZED_IMAGE_SIZE, num_classes=e_cfg.NUM_CLASSES, init_weights=True) vgg = vgg.to(e_cfg.DEVICE) print("--- Model Architecture ---") print(vgg) #loads the model if a saved model. model_params = torch.load(e_cfg.MODEL_PATH+e_cfg.MODEL_NAME) #get vgg.load_state_dict(model_params) vgg.eval() #change the model to eval mode after loading the parameters. IMPORTANT STEP! print("Model parameters are loaded from the saved file!") in_img = input("Please input the path of the image you wish to be evaluated: ") loaded_image = read_image(image_path=in_img, resized_image_size=e_cfg.RESIZED_IMAGE_SIZE) #load the image using cv2. tensor_image = ToTensor(mode='eval')({'image':loaded_image})['image'] #convert the loaded numpy image to Tensor using eval mode and extract only the image from the dict. #adds an extra dimension to emulate the batch size of 1 in the front and move the tensor to GPU if available. tensor_image = tensor_image.view(1, tensor_image.size()[0], tensor_image.size()[1], tensor_image.size()[2]).to(e_cfg.DEVICE) prediction_tensor = vgg(tensor_image) #output from the network. predicted_class = evaluate_class(net_output=prediction_tensor, classes_list=e_cfg.CLASSES) #get the predicted class. print(predicted_class) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """Testing dirstack""" #from __future__ import unicode_literals, print_function from contextlib import contextmanager from functools import wraps import os import os.path import subprocess import builtins import pytest from xonsh import dirstack from xonsh.environ import Env from xonsh.built_ins import load_builtins from xonsh.dirstack import DIRSTACK from xonsh.platform import ON_WINDOWS from xonsh.dirstack import _unc_tempDrives HERE = os.path.abspath(os.path.dirname(__file__)) PARENT = os.path.dirname(HERE) def drive_in_use(letter): return ON_WINDOWS and os.system('vol {}: 2>nul>nul'.format(letter)) == 0 MAX_TEMP_DRIVES = 4 TEMP_DRIVE = [] for d in 'zyxwvuts': if not drive_in_use(d): TEMP_DRIVE.append(d + ':') pytestmark = pytest.mark.skipif(len(TEMP_DRIVE) < MAX_TEMP_DRIVES, reason='Too many drive letters are already used by Windows to run the tests.') @pytest.yield_fixture(scope="module") def shares_setup(tmpdir_factory): """create some shares to play with on current machine. Yield (to test case) array of structs: [uncPath, driveLetter, equivLocalPath] Side effect: `os.chdir(TEST_WORK_DIR)` """ if not ON_WINDOWS: return [] shares = [[r'uncpushd_test_HERE', TEMP_DRIVE[1], HERE] , [r'uncpushd_test_PARENT', TEMP_DRIVE[3], PARENT]] for s, d, l in shares: # set up some shares on local machine. dirs already exist test case must invoke wd_setup. rtn = subprocess.call(['NET', 'SHARE', s, '/delete'], universal_newlines=True) # clean up from previous run after good, long wait. if rtn != 0: yield None return rtn = subprocess.call(['NET', 'SHARE', s + '=' + l], universal_newlines=True) if rtn != 0: yield None return rtn = subprocess.call(['NET', 'USE', d, r"\\localhost" + '\\' + s], universal_newlines=True) if rtn != 0: yield None return yield [[r"\\localhost" + '\\' + s[0], s[1], s[2]] for s in shares] # we want to delete the test shares we've created, but can't do that if unc shares in DIRSTACK # (left over from assert fail aborted test) os.chdir(HERE) for dl in _unc_tempDrives: rtn = subprocess.call(['net', 'use', dl, '/delete'], universal_newlines=True) for s, d, l in shares: rtn = subprocess.call(['net', 'use', d, '/delete'], universal_newlines=True) # subprocess.call(['net', 'share', s, '/delete'], universal_newlines=True) # fails with access denied, # unless I wait > 10 sec. see http://stackoverflow.com/questions/38448413/access-denied-in-net-share-delete def test_pushdpopd(xonsh_builtins): """Simple non-UNC push/pop to verify we didn't break nonUNC case. """ xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([HERE]) wd = os.getcwd() assert wd.casefold() == HERE.casefold() dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" def test_cd_dot(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(PWD=os.getcwd()) owd = os.getcwd().casefold() dirstack.cd(['.']) assert owd == os.getcwd().casefold() @pytest.mark.skipif( not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_simple_push_pop(xonsh_builtins, shares_setup): if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" assert len(_unc_tempDrives) == 0 @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_to_same_share(xonsh_builtins, shares_setup): if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 2 dirstack.popd([]) assert os.path.isdir(TEMP_DRIVE[0] + '\\'), "Temp drive not unmapped till last reference removed" dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" assert len(_unc_tempDrives) == 0 @pytest.mark.skipif( not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_other_push_same(xonsh_builtins, shares_setup): """push to a, then to b. verify drive letter is TEMP_DRIVE[2], skipping already used TEMP_DRIVE[1] Then push to a again. Pop (check b unmapped and a still mapped), pop, pop (check a is unmapped)""" if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 dirstack.pushd([r'\\localhost\uncpushd_test_PARENT']) wd = os.getcwd() assert os.getcwd().casefold() == TEMP_DRIVE[2] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 2 dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 3 dirstack.popd([]) assert os.getcwd().casefold() == TEMP_DRIVE[2] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 2 assert os.path.isdir(TEMP_DRIVE[2] + '\\') assert os.path.isdir(TEMP_DRIVE[0] + '\\') dirstack.popd([]) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 assert not os.path.isdir(TEMP_DRIVE[2] + '\\') assert os.path.isdir(TEMP_DRIVE[0] + '\\') dirstack.popd([]) assert os.getcwd().casefold() == owd.casefold() assert len(_unc_tempDrives) == 0 assert len(DIRSTACK) == 0 assert not os.path.isdir(TEMP_DRIVE[2] + '\\') assert not os.path.isdir(TEMP_DRIVE[0] + '\\') @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_base_push_rempath(xonsh_builtins): """push to subdir under share, verify mapped path includes subdir""" pass #really? Need to cut-and-paste 2 flavors of this? yield_fixture requires yield in defined function body, not callee @pytest.yield_fixture() def with_unc_check_enabled(): if not ON_WINDOWS: return import winreg old_wval = 0 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) try: wval, wtype = winreg.QueryValueEx(key, 'DisableUNCCheck') old_wval = wval # if values was defined at all except OSError as e: pass winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, 0) winreg.CloseKey(key) yield old_wval key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, old_wval) winreg.CloseKey(key) @pytest.yield_fixture() def with_unc_check_disabled(): # just like the above, but value is 1 to *disable* unc check if not ON_WINDOWS: return import winreg old_wval = 0 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) try: wval, wtype = winreg.QueryValueEx(key, 'DisableUNCCheck') old_wval = wval # if values was defined at all except OSError as e: pass winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, 1) winreg.CloseKey(key) yield old_wval key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, old_wval) winreg.CloseKey(key) @pytest.fixture() def xonsh_builtins_cd(xonsh_builtins): xonsh_builtins.__xonsh_env__['CDPATH'] = PARENT xonsh_builtins.__xonsh_env__['PWD'] = os.getcwd() xonsh_builtins.__xonsh_env__['DIRSTACK_SIZE'] = 20 return xonsh_builtins @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_auto_pushd(xonsh_builtins_cd, with_unc_check_enabled): xonsh_builtins_cd.__xonsh_env__['AUTO_PUSHD'] = True so, se, rc = dirstack.cd([r'\\localhost\uncpushd_test_PARENT']) if rc != 0: return assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(DIRSTACK) == 1 assert os.path.isdir(TEMP_DRIVE[0] + '\\') @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_nocheck(xonsh_builtins_cd, with_unc_check_disabled): if with_unc_check_disabled == 0: return dirstack.cd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == r'\\localhost\uncpushd_test_here' @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_no_auto_pushd(xonsh_builtins_cd, with_unc_check_enabled): if with_unc_check_enabled == 0: return so, se, rc = dirstack.cd([r'\\localhost\uncpushd_test_PARENT']) assert rc != 0 assert so is None or len(so) == 0 assert 'disableunccheck' in se.casefold() and 'auto_pushd' in se.casefold() @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_unc_check(): # emminently suited to mocking, but I don't know how # need to verify unc_check_enabled correct whether values set in HKCU or HKLM pass
# -*- coding: utf-8 -*- # pylint: disable=C0103 # pylint: disable=C0111 import ustruct import uctypes from ubinascii import hexlify, unhexlify from micropython import const """ SCO handle is 12 bits, followed by 2 bits packet status flags. 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 ------------------------------------------------- | handle |ps |xx | length | ------------------------------------------------- """ HCI_SCO_STRUCT = { "handle": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 12 << uctypes.BF_LEN, "ps": uctypes.BFUINT16 | 0 | 12 << uctypes.BF_POS | 2 << uctypes.BF_LEN, "xx": uctypes.BFUINT16 | 0 | 14 << uctypes.BF_POS | 2 << uctypes.BF_LEN, "length": uctypes.UINT8 | 2 } class HCI_SCO(object): """HCI_SCO""" struct_format = "<HB" struct_size = ustruct.calcsize(struct_format) def __init__(self, handle, ps=0, xx=0, data=b''): bin_str = "{:016b}{:02b}{:02b}{:012b}".format( len(data) if data else 0, xx, ps, handle ) self._handle = handle self._ps = ps self._xx = xx self._tobytes = int(bin_str, 2) self._data = data def __getattr__(self, name): if name == "handle": return self._handle elif name == "ps": return self._ps elif name == "xx": return self._xx elif name == "tobytes": return self._tobytes elif name == "length": return len(self._data) elif name == "data": return self._data def __str__(self): desc_str = ( "<{:s} " "handle=0x{:04x} ps=0x{:02x} xx=0x{:02x} " "length={:d} data={:s}>" ) return desc_str.format( self.__class__.__name__, self.handle, self.ps, self.xx, self.length, hexlify(self.data) ) @staticmethod def from_buffer(data): """ Parse HCI SCO data References can be found here: * https://www.bluetooth.org/en-us/specification/adopted-specifications ** Core specification 4.1 ** [vol 2] Part E (Section 5) - HCI Data Formats ** [vol 2] Part E (Section 5.4) - Exchange of HCI-specific information """ hci_sco = uctypes.struct( uctypes.addressof(data[:HCI_SCO.struct_size]), HCI_SCO_STRUCT, uctypes.LITTLE_ENDIAN ) data = data[HCI_SCO.struct_size:] return HCI_SCO(hci_sco.handle, hci_sco.ps, hci_sco.xx, data) def to_buffer(self): """ Get data string """ return ustruct.pack(self.struct_format, self.tobytes) + self.data
from pyotp.otp import OTP import urllib class HOTP(OTP): def at(self, count): """ Generates the OTP for the given count @param [Integer] count counter @returns [Integer] OTP """ return self.generate_otp(count) def verify(self, otp, counter): """ Verifies the OTP passed in against the current time OTP @param [String/Integer] otp the OTP to check against @param [Integer] counter the counter of the OTP """ return unicode(otp) == unicode(self.at(counter)) def provisioning_uri(self, name, initial_count=0): """ Returns the provisioning URI for the OTP This can then be encoded in a QR Code and used to provision the Google Authenticator app @param [String] name of the account @param [Integer] initial_count starting counter value, defaults to 0 @return [String] provisioning uri """ return 'otpauth://hotp/%(name)s?secret=%(secret)s&counter=%(initial_count)s' % { 'name': urllib.quote(name, safe='@'), 'secret': self.secret, 'initial_count': initial_count, }
import os import pytest import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_directories(host): dirs = [ '/DATA', '/DATA/docker', '/DATA/fluentd', '/DATA/grafana', '/DATA/metrics', '/DATA/migrations', '/DATA/mongodb', '/DATA/plugins', '/DATA/postgresql', '/DATA/prometheus', '/DATA/tokeraggregated', '/DATA/unomaly_actions', '/DATA/unomaly_celery', '/DATA/unomaly_logs', '/DATA/unomaly_transports', '/opt/unomaly', '/opt/unomaly/conf', '/opt/unomaly/bin', '/opt/unomaly/fluentd', '/opt/unomaly/install', '/opt/unomaly/install/logs', '/opt/unomaly/license', '/opt/unomaly/mayday', '/opt/unomaly/mayday/scripts', '/opt/unomaly/role', '/opt/unomaly/www', ] for d in dirs: dir = host.file(d) assert dir.is_directory def test_unomaly_files_exist(host): in_file = host.file('/DATA/unomaly_instance') assert in_file.is_file version_file = host.file('/opt/unomaly/VERSION') assert version_file.contains('2.3') assert version_file.is_file def test_apache(host): pkg = host.package("apache2") assert pkg.is_installed assert pkg.version.startswith("2.4") service = host.service('apache2') assert service.is_running assert service.is_enabled def test_unomaly_command(host): with host.sudo(): comm = host.command('unomaly') assert comm.rc == 0 output = comm.stdout services = [ 'fluentd', 'grafana', 'nats', 'cupid', 'postgres', 'celery-beat', 'celery-deletion', 'celery', 'check-standalone', 'config-wizard', 'connect', 'dashboard', 'horizon', 'ingestion', 'licensed', 'api', 'forager', 'tad', 'pluginjs', 'sid', 'systemstated', 'transportd', 'unomalyweb', 'prometheus-core', 'prometheus-cadvisor', 'prometheus-mongo', 'prometheus-node', 'prometheus-postgres', 'syslogng', ] for srv in services: assert srv in output assert 'inactive' not in output assert 'dead' not in output assert 'activating' not in output
# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # # SPDX-License-Identifier: MIT import os import sys sys.path.insert(0, os.path.abspath("..")) # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"] # Uncomment the below if you use native CircuitPython modules such as # digitalio, micropython and busio. List the modules you use. Without it, the # autodoc module docs will fail to generate with a warning. autodoc_mock_imports = ["adafruit_bus_device", "micropython", "adafruit_register"] intersphinx_mapping = { "python": ("https://docs.python.org/3.4", None), "BusDevice": ( "https://circuitpython.readthedocs.io/projects/busdevice/en/latest/", None, ), "CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None), } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "Adafruit BNO055 Library" copyright = "2017 Radomir Dopieralski" author = "Radomir Dopieralski" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1.0" # The full version, including alpha/beta/rc tags. release = "1.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = "any" # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # If this is True, todo emits a warning for each TODO entries. The default is False. todo_emit_warnings = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: # only import and set the theme if we're building docs locally try: import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."] except: html_theme = "default" html_theme_path = ["."] else: html_theme_path = ["."] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = "_static/favicon.ico" # Output file base name for HTML help builder. htmlhelp_basename = "AdafruitBNO055Librarydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "AdafruitBNO055Library.tex", "Adafruit BNO055 Library Documentation", "Radomir Dopieralski", "manual", ) ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "adafruitBNO055library", "Adafruit BNO055 Library Documentation", [author], 1, ) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "AdafruitBNO055Library", "Adafruit BNO055 Library Documentation", author, "AdafruitBNO055Library", "One line description of project.", "Miscellaneous", ) ]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import typing from collections import namedtuple from pathlib import Path from ruamel import yaml temperate = """\ # 0 for standalone, 1 for cluster work_mode: 0 # 0 for eggroll, 1 for spark backend: 0 # base dir for data upload conf eg # examples/data/breast_hetero_guest.csv -> $data_base_dir/examples/data/breast_hetero_guest.csv data_base_dir: ../../../ parties: guest: [10000] host: [9999, 10000] arbiter: [9999] services: - flow_services: - {address: 127.0.0.1:9380, parties: [9999, 10000]} ssh_tunnel: # optional enable: false ssh_address: <remote ip>:<remote port> ssh_username: ssh_password: # optional ssh_priv_key: "~/.ssh/id_rsa" # what is ssh_tunnel? # to open the ssh tunnel(s) if the remote service # cannot be accessed directly from the location where the test suite is run! # # +---------------------+ # | ssh address | # | ssh username | # | ssh password/ | # +--------+ | ssh priv_key | +----------------+ # |local ip+----------ssh tuunel-------------->+remote local ip | # +--------+ | | +----------------+ # | | # request local ip:port +----- as if --------->request remote's local ip:port from remote side # | | # | | # +---------------------+ # """ _default_config = Path(__file__).parent.joinpath("fate_test_config.yaml").resolve() def create_config(path: Path, override=False): if path.exists() and not override: raise FileExistsError(f"{path} exists") with path.open("w") as f: f.write(temperate) def default_config(): if not _default_config.exists(): create_config(_default_config) return _default_config class Parties(object): def __init__(self, guest: typing.List[int], host: typing.List[int], arbiter: typing.List[int] = None): self.guest = guest self.host = host self.arbiter = arbiter or [] self._party_to_role_string = {} for role in ["guest", "host", "arbiter"]: parties = getattr(self, role) for i, party in enumerate(parties): if party not in self._party_to_role_string: self._party_to_role_string[party] = set() self._party_to_role_string[party].add(f"{role.lower()}_{i}") @staticmethod def from_dict(d: dict): return Parties(**d) def party_to_role_string(self, party): return self._party_to_role_string[party] def extract_role(self, counts: typing.MutableMapping[str, int]): roles = {} for role, num in counts.items(): if not hasattr(self, role): raise ValueError(f"{role} should be one of [guest, host, arbiter]") else: if len(getattr(self, role)) < num: raise ValueError(f"require {num} {role} parties, only {len(getattr(self, role))} in config") roles[role] = getattr(self, role)[:num] return roles def extract_initiator_role(self, role): initiator_role = role.strip() if len(getattr(self, initiator_role)) < 1: raise ValueError(f"role {initiator_role} has empty party list") party_id = getattr(self, initiator_role)[0] return dict(role=initiator_role, party_id=party_id) class Config(object): service = namedtuple("service", ["address"]) tunnel_service = namedtuple("tunnel_service", ["tunnel_id", "index"]) tunnel = namedtuple("tunnel", ["ssh_address", "ssh_username", "ssh_password", "ssh_priv_key", "services_address"]) def __init__(self, config): self.work_mode = config["work_mode"] self.backend = config["backend"] self.data_base_dir = config["data_base_dir"] self.parties = Parties.from_dict(config["parties"]) self.party_to_service_id = {} self.service_id_to_service = {} self.tunnel_id_to_tunnel = {} tunnel_id = 0 service_id = 0 for service_config in config["services"]: flow_services = service_config["flow_services"] if service_config.get("ssh_tunnel", {}).get("enable", False): tunnel_id += 1 services_address = [] for index, flow_service in enumerate(flow_services): service_id += 1 address_host, address_port = flow_service["address"].split(":") address_port = int(address_port) services_address.append((address_host, address_port)) self.service_id_to_service[service_id] = self.tunnel_service(tunnel_id, index) for party in flow_service["parties"]: self.party_to_service_id[party] = service_id tunnel_config = service_config["ssh_tunnel"] ssh_address_host, ssh_address_port = tunnel_config["ssh_address"].split(":") self.tunnel_id_to_tunnel[tunnel_id] = self.tunnel((ssh_address_host, int(ssh_address_port)), tunnel_config["ssh_username"], tunnel_config["ssh_password"], tunnel_config["ssh_priv_key"], services_address) else: for flow_service in flow_services: service_id += 1 address = flow_service["address"] self.service_id_to_service[service_id] = self.service(address) for party in flow_service["parties"]: self.party_to_service_id[party] = service_id @staticmethod def load(path: typing.Union[str, Path], **kwargs): if isinstance(path, str): path = Path(path) config = {} if path is not None: with path.open("r") as f: config.update(yaml.safe_load(f)) config["data_base_dir"] = path.resolve().joinpath(config["data_base_dir"]).resolve() config.update(kwargs) return Config(config) @staticmethod def load_from_file(path: typing.Union[str, Path]): """ Loads conf content from json or yaml file. Used to read in parameter configuration Parameters ---------- path: str, path to conf file, should be absolute path Returns ------- dict, parameter configuration in dictionary format """ if isinstance(path, str): path = Path(path) config = {} if path is not None: file_type = path.suffix with path.open("r") as f: if file_type == ".yaml": config.update(yaml.safe_load(f)) elif file_type == ".json": config.update(json.load(f)) else: raise ValueError(f"Cannot load conf from file type {file_type}") return config def parse_config(config): try: config_inst = Config.load(config) except Exception as e: raise RuntimeError(f"error parse config from {config}") from e return config_inst
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cafe.drivers.unittest.decorators import tags from cloudcafe.compute.common.types import NovaServerRebootTypes from cloudcafe.compute.common.exceptions import ActionInProgress from cloudcafe.compute.common.types import NovaServerStatusTypes \ as ServerStates from cloudcafe.compute.common.clients.ping import PingClient class SuspendServerTests(object): @tags(type='smoke', net='yes') def test_suspend_resume_server(self): """ Verify that a server can be suspended and then resumed. Will suspend the server and waits for the server state to be "PAUSED" followed by pinging the ip until its unreachable. Resumes the server and waits for the server state to be active followed by pinging the server until its reachable. Then retrieve the instance. The following assertions occur: - 202 status code response from the suspend server call. - 202 status code response from the start server call. - Get remote instance client returns true (successful connection). """ ping_ip = self.get_accessible_ip_address(self.server) response = self.admin_servers_client.suspend_server(self.server.id) self.assertEqual(response.status_code, 202) self.admin_server_behaviors.wait_for_server_status( self.server.id, ServerStates.PAUSED) PingClient.ping_until_unreachable( ping_ip, timeout=60, interval_time=5) response = self.admin_servers_client.resume_server(self.server.id) self.assertEqual(response.status_code, 202) self.admin_server_behaviors.wait_for_server_status( self.server.id, ServerStates.ACTIVE) PingClient.ping_until_reachable( ping_ip, timeout=60, interval_time=5) self.assertTrue(self.server_behaviors.get_remote_instance_client( self.server, self.servers_config), "Unable to connect to active server {0} after suspending " "and resuming".format(self.server.id)) class NegativeSuspendServerTests(object): @tags(type='smoke', net='yes') def test_suspend_reboot_hard_server(self): """ Verify that a server reboot after suspend does not restore it. Will suspend the server and waits for the server state to be "PAUSED" followed by pinging the ip until its unreachable. Tries to reboot the server and expects a "ActionInProgress" exception to be raised. Then will ping until its unreachable again. The following assertions occur: - 202 status code response from the stop server call. - Expect a "ActionInProgress" exception is raised when rebooting. """ ping_ip = self.get_accessible_ip_address(self.server) response = self.admin_servers_client.suspend_server(self.server.id) self.assertEqual(response.status_code, 202) self.admin_server_behaviors.wait_for_server_status( self.server.id, ServerStates.PAUSED) PingClient.ping_until_unreachable( ping_ip, timeout=60, interval_time=5) with self.assertRaises(ActionInProgress): self.servers_client.reboot(self.server.id, NovaServerRebootTypes.HARD) PingClient.ping_until_unreachable( ping_ip, timeout=60, interval_time=5)
from node.blockchain.inner_models import Node def make_node(node_key_pair, addresses): return Node( identifier=node_key_pair.public, addresses=addresses, fee=4, )
import logging import os import sys import time import traceback from dials.util import Sorry from dials.util.version import dials_version from libtbx import group_args import xia2.Driver.timing import xia2.Handlers.Streams import xia2.XIA2Version from xia2.Applications.xia2_helpers import process_one_sweep from xia2.Applications.xia2_main import ( check_environment, get_command_line, help, write_citations, ) from xia2.Handlers.Citations import Citations from xia2.Handlers.Files import cleanup from xia2.Schema.XProject import XProject from xia2.Schema.XSweep import XSweep logger = logging.getLogger("xia2.cli.xia2_main") def get_ccp4_version(): CCP4 = os.environ.get("CCP4") if CCP4 is not None: version_file = os.path.join(CCP4, "lib", "ccp4", "MAJOR_MINOR") if os.path.exists(version_file): with open(version_file) as fh: return fh.read().strip() def xia2_main(stop_after=None): """Actually process something...""" Citations.cite("xia2") # print versions of related software logger.info(dials_version()) ccp4_version = get_ccp4_version() if ccp4_version: logger.info("CCP4 %s", ccp4_version) start_time = time.time() CommandLine = get_command_line() # check that something useful has been assigned for processing... xtals = CommandLine.get_xinfo().get_crystals() for name, xtal in xtals.items(): if not xtal.get_all_image_names(): logger.info("-----------------------------------" + "-" * len(name)) logger.info("| No images assigned for crystal %s |", name) logger.info("-----------------------------------" + "-" * len(name)) from xia2.Handlers.Phil import PhilIndex params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing njob = mp_params.njob xinfo = CommandLine.get_xinfo() logger.info("Project directory: %s", xinfo.path) if ( params.xia2.settings.developmental.continue_from_previous_job and os.path.exists("xia2.json") ): logger.debug("==== Starting from existing xia2.json ====") xinfo_new = xinfo xinfo = XProject.from_json(filename="xia2.json") crystals = xinfo.get_crystals() crystals_new = xinfo_new.get_crystals() for crystal_id in crystals_new: if crystal_id not in crystals: crystals[crystal_id] = crystals_new[crystal_id] continue crystals[crystal_id]._scaler = None # reset scaler for wavelength_id in crystals_new[crystal_id].get_wavelength_names(): wavelength_new = crystals_new[crystal_id].get_xwavelength(wavelength_id) if wavelength_id not in crystals[crystal_id].get_wavelength_names(): crystals[crystal_id].add_wavelength( crystals_new[crystal_id].get_xwavelength(wavelength_new) ) continue wavelength = crystals[crystal_id].get_xwavelength(wavelength_id) sweeps_new = wavelength_new.get_sweeps() sweeps = wavelength.get_sweeps() sweep_names = {s.get_name() for s in sweeps} sweep_keys = { (s.get_directory(), s.get_template(), s.get_image_range()) for s in sweeps } for sweep in sweeps_new: if ( sweep.get_directory(), sweep.get_template(), sweep.get_image_range(), ) not in sweep_keys: if sweep.get_name() in sweep_names: i = 1 while "SWEEEP%i" % i in sweep_names: i += 1 sweep._name = "SWEEP%i" % i break wavelength.add_sweep( name=sweep.get_name(), sample=sweep.sample, directory=sweep.get_directory(), image=sweep.get_image(), beam=sweep.get_beam_centre(), reversephi=sweep.get_reversephi(), distance=sweep.get_distance(), gain=sweep.get_gain(), dmin=sweep.get_resolution_high(), dmax=sweep.get_resolution_low(), polarization=sweep.get_polarization(), frames_to_process=sweep.get_frames_to_process(), user_lattice=sweep.get_user_lattice(), user_cell=sweep.get_user_cell(), epoch=sweep._epoch, ice=sweep._ice, excluded_regions=sweep._excluded_regions, ) sweep_names.add(sweep.get_name()) crystals = xinfo.get_crystals() failover = params.xia2.settings.failover with cleanup(xinfo.path): if mp_params.mode == "parallel" and njob > 1: driver_type = mp_params.type command_line_args = CommandLine.get_argv()[1:] jobs = [] for crystal_id in crystals: for wavelength_id in crystals[crystal_id].get_wavelength_names(): wavelength = crystals[crystal_id].get_xwavelength(wavelength_id) sweeps = wavelength.get_sweeps() for sweep in sweeps: sweep._get_indexer() sweep._get_refiner() sweep._get_integrater() jobs.append( ( group_args( driver_type=driver_type, stop_after=stop_after, failover=failover, command_line_args=command_line_args, nproc=mp_params.nproc, crystal_id=crystal_id, wavelength_id=wavelength_id, sweep_id=sweep.get_name(), ), ) ) from xia2.Driver.DriverFactory import DriverFactory default_driver_type = DriverFactory.get_driver_type() # run every nth job on the current computer (no need to submit to qsub) for i_job, arg in enumerate(jobs): if (i_job % njob) == 0: arg[0].driver_type = default_driver_type nproc = mp_params.nproc qsub_command = mp_params.qsub_command or "qsub" qsub_command = "%s -V -cwd -pe smp %d" % (qsub_command, nproc) from libtbx import easy_mp results = easy_mp.parallel_map( process_one_sweep, jobs, processes=njob, method="multiprocessing", qsub_command=qsub_command, preserve_order=True, preserve_exception_message=True, ) # Hack to update sweep with the serialized indexers/refiners/integraters i_sweep = 0 for crystal_id in crystals: for wavelength_id in crystals[crystal_id].get_wavelength_names(): wavelength = crystals[crystal_id].get_xwavelength(wavelength_id) remove_sweeps = [] sweeps = wavelength.get_sweeps() for sweep in sweeps: success, output, xsweep_dict = results[i_sweep] if output is not None: logger.info(output) if not success: logger.info("Sweep failed: removing %s", sweep.get_name()) remove_sweeps.append(sweep) else: assert xsweep_dict is not None logger.info("Loading sweep: %s", sweep.get_name()) new_sweep = XSweep.from_dict(xsweep_dict) sweep._indexer = new_sweep._indexer sweep._refiner = new_sweep._refiner sweep._integrater = new_sweep._integrater i_sweep += 1 for sweep in remove_sweeps: wavelength.remove_sweep(sweep) sample = sweep.sample sample.remove_sweep(sweep) else: for crystal_id in list(crystals.keys()): for wavelength_id in crystals[crystal_id].get_wavelength_names(): wavelength = crystals[crystal_id].get_xwavelength(wavelength_id) remove_sweeps = [] sweeps = wavelength.get_sweeps() for sweep in sweeps: from dials.command_line.show import show_experiments from dxtbx.model.experiment_list import ExperimentListFactory logger.debug(sweep.get_name()) logger.debug( show_experiments( ExperimentListFactory.from_imageset_and_crystal( sweep.get_imageset(), None ) ) ) Citations.cite("dials") try: if stop_after == "index": sweep.get_indexer_cell() else: sweep.get_integrater_intensities() sweep.serialize() except Exception as e: if failover: logger.info( "Processing sweep %s failed: %s", sweep.get_name(), str(e), ) remove_sweeps.append(sweep) else: raise for sweep in remove_sweeps: wavelength.remove_sweep(sweep) sample = sweep.sample sample.remove_sweep(sweep) # save intermediate xia2.json file in case scaling step fails xinfo.as_json(filename="xia2.json") if stop_after not in ("index", "integrate"): logger.info(xinfo.get_output()) for crystal in list(crystals.values()): crystal.serialize() # save final xia2.json file in case report generation fails xinfo.as_json(filename="xia2.json") if stop_after not in ("index", "integrate"): # and the summary file with open("xia2-summary.dat", "w") as fh: for record in xinfo.summarise(): fh.write("%s\n" % record) # looks like this import overwrites the initial command line # Phil overrides so... for https://github.com/xia2/xia2/issues/150 from .xia2_html import generate_xia2_html if params.xia2.settings.small_molecule: params.xia2.settings.report.xtriage_analysis = False params.xia2.settings.report.include_radiation_damage = False with xia2.Driver.timing.record_step("xia2.report"): generate_xia2_html( xinfo, filename="xia2.html", params=params.xia2.settings.report ) duration = time.time() - start_time # write out the time taken in a human readable way logger.info( "Processing took %s", time.strftime("%Hh %Mm %Ss", time.gmtime(duration)) ) write_citations() def run(): if len(sys.argv) < 2 or "-help" in sys.argv or "--help" in sys.argv: help() sys.exit() if "-version" in sys.argv or "--version" in sys.argv: print(xia2.XIA2Version.Version) print(dials_version()) ccp4_version = get_ccp4_version() if ccp4_version: print("CCP4 %s" % ccp4_version) sys.exit() xia2.Handlers.Streams.setup_logging(logfile="xia2.txt", debugfile="xia2-debug.txt") try: check_environment() except Exception as e: traceback.print_exc(file=open("xia2-error.txt", "w")) logger.debug(traceback.format_exc()) logger.error("Error setting up xia2 environment: %s" % str(e)) logger.warning( "Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:" ) logger.warning("xia2.support@gmail.com") sys.exit(1) wd = os.getcwd() try: xia2_main() logger.debug("\nTiming report:") logger.debug("\n".join(xia2.Driver.timing.report())) logger.info("Status: normal termination") return except Sorry as s: logger.error("Error: %s", str(s)) sys.exit(1) except Exception as e: with open(os.path.join(wd, "xia2-error.txt"), "w") as fh: traceback.print_exc(file=fh) logger.debug(traceback.format_exc()) logger.error("Error: %s", str(e)) logger.warning( "Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:" ) logger.warning("xia2.support@gmail.com") sys.exit(1)
"""Aggregation function for CLI specified options and config file options. This holds the logic that uses the collected and merged config files and applies the user-specified command-line configuration on top of it. """ import argparse import configparser import logging from typing import Optional from typing import Sequence from flake8.options import config from flake8.options.manager import OptionManager LOG = logging.getLogger(__name__) def aggregate_options( manager: OptionManager, cfg: configparser.RawConfigParser, cfg_dir: str, argv: Optional[Sequence[str]], ) -> argparse.Namespace: """Aggregate and merge CLI and config file options.""" # Get defaults from the option parser default_values = manager.parse_args([]) # Get the parsed config parsed_config = config.parse_config(manager, cfg, cfg_dir) # Extend the default ignore value with the extended default ignore list, # registered by plugins. extended_default_ignore = manager.extended_default_ignore.copy() # Let's store our extended default ignore for use by the decision engine default_values.extended_default_ignore = ( manager.extended_default_ignore.copy() ) LOG.debug( "Extended default ignore list: %s", list(extended_default_ignore) ) extended_default_ignore.update(default_values.ignore) default_values.ignore = list(extended_default_ignore) LOG.debug("Merged default ignore list: %s", default_values.ignore) extended_default_select = manager.extended_default_select.copy() LOG.debug( "Extended default select list: %s", list(extended_default_select) ) default_values.extended_default_select = extended_default_select # Merge values parsed from config onto the default values returned for config_name, value in parsed_config.items(): dest_name = config_name # If the config name is somehow different from the destination name, # fetch the destination name from our Option if not hasattr(default_values, config_name): dest_val = manager.config_options_dict[config_name].dest assert isinstance(dest_val, str) dest_name = dest_val LOG.debug( 'Overriding default value of (%s) for "%s" with (%s)', getattr(default_values, dest_name, None), dest_name, value, ) # Override the default values with the config values setattr(default_values, dest_name, value) # Finally parse the command-line options return manager.parse_args(argv, default_values)
from typing import Any, Optional, Sequence, Union import dagster._check as check from ..execution.execute_in_process_result import ExecuteInProcessResult from ..execution.with_resources import with_resources from ..instance import DagsterInstance from ..storage.fs_io_manager import fs_io_manager from .assets import AssetsDefinition from .assets_job import build_assets_job from .source_asset import SourceAsset def materialize( assets: Sequence[Union[AssetsDefinition, SourceAsset]], run_config: Any = None, instance: Optional[DagsterInstance] = None, ) -> ExecuteInProcessResult: """ Executes a single-threaded, in-process run which materializes provided assets. By default, will materialize assets to the local filesystem. Args: assets (Sequence[Union[AssetsDefinition, SourceAsset]]): The assets to materialize. Can also provide ``SourceAsset``s to fill dependencies for asset defs. run_config (Optional[Any]): The run config to use for the run that materializes the assets. Returns: ExecuteInProcessResult: The result of the execution. """ assets = check.sequence_param(assets, "assets", of_type=(AssetsDefinition, SourceAsset)) assets = with_resources(assets, {"io_manager": fs_io_manager}) assets_defs = [the_def for the_def in assets if isinstance(the_def, AssetsDefinition)] source_assets = [the_def for the_def in assets if isinstance(the_def, SourceAsset)] instance = check.opt_inst_param(instance, "instance", DagsterInstance) return build_assets_job( "in_process_materialization_job", assets=assets_defs, source_assets=source_assets, ).execute_in_process(run_config=run_config, instance=instance)
''' agent_exclusions ================ The following methods allow for interaction into the Tenable.io :devportal:`agent exclusions <agent-exclusions>` API endpoints. Methods available on ``tio.agent_exclusions``: .. rst-class:: hide-signature .. autoclass:: AgentExclusionsAPI .. automethod:: create .. automethod:: delete .. automethod:: details .. automethod:: edit .. automethod:: list ''' from restfly.utils import dict_merge, dict_clean from .base import TIOEndpoint from datetime import date, datetime, timedelta class AgentExclusionsAPI(TIOEndpoint): def create(self, name, scanner_id=1, start_time=None, end_time=None, timezone=None, description=None, frequency=None, interval=None, weekdays=None, day_of_month=None, enabled=True): ''' Creates a new agent exclusion. :devportal:`agent-exclusions: create <agent-exclusions-create>` Args: name (str): The name of the exclusion to create. scanner_id (int, optional): The scanner id. description (str, optional): Some further detail about the exclusion. start_time (datetime): When the exclusion should start. end_time (datetime): When the exclusion should end. timezone (str, optional): The timezone to use for the exclusion. The default if none is specified is to use UTC. For the list of usable timezones, please refer to: https://cloud.tenable.com/api#/resources/scans/timezones frequency (str, optional): The frequency of the rule. The string inputted will be up-cased. Valid values are: ``ONETIME``, ``DAILY``, ``WEEKLY``, ``MONTHLY``, ``YEARLY``. Default value is ``ONETIME``. interval (int, optional): The interval of the rule. The default interval is 1 weekdays (list, optional): List of 2-character representations of the days of the week to repeat the frequency rule on. Valid values are: *SU, MO, TU, WE, TH, FR, SA* Default values: ``['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA']`` day_of_month (int, optional): The day of the month to repeat a **MONTHLY** frequency rule on. The default is today. enabled (bool, optional): enable/disable exclusion. The default is ``True`` Returns: dict: Dictionary of the newly minted exclusion. Examples: Creating a one-time exclusion: >>> from datetime import datetime, timedelta >>> exclusion = tio.agent_exclusions.create( ... 'Example One-Time Agent Exclusion', ... ['127.0.0.1'], ... start_time=datetime.utcnow(), ... end_time=datetime.utcnow() + timedelta(hours=1)) Creating a daily exclusion: >>> exclusion = tio.agent_exclusions.create( ... 'Example Daily Agent Exclusion', ... ['127.0.0.1'], ... frequency='daily', ... start_time=datetime.utcnow(), ... end_time=datetime.utcnow() + timedelta(hours=1)) Creating a weekly exclusion: >>> exclusion = tio.agent_exclusions.create( ... 'Example Weekly Exclusion', ... ['127.0.0.1'], ... frequency='weekly', ... weekdays=['mo', 'we', 'fr'], ... start_time=datetime.utcnow(), ... end_time=datetime.utcnow() + timedelta(hours=1)) Creating a monthly esxclusion: >>> exclusion = tio.agent_exclusions.create( ... 'Example Monthly Agent Exclusion', ... ['127.0.0.1'], ... frequency='monthly', ... day_of_month=1, ... start_time=datetime.utcnow(), ... end_time=datetime.utcnow() + timedelta(hours=1)) Creating a yearly exclusion: >>> exclusion = tio.agent_exclusions.create( ... 'Example Yearly Agent Exclusion', ... ['127.0.0.1'], ... frequency='yearly', ... start_time=datetime.utcnow(), ... end_time=datetime.utcnow() + timedelta(hours=1)) ''' # Starting with the innermost part of the payload, lets construct the # rrules dictionary. frequency = self._check('frequency', frequency, str, choices=['ONETIME', 'DAILY', 'WEEKLY', 'MONTHLY', 'YEARLY'], default='ONETIME', case='upper') rrules = { 'freq': frequency, 'interval': self._check('interval', interval, int, default=1) } # if the frequency is a weekly one, then we will need to specify the # days of the week that the exclusion is run on. if frequency == 'WEEKLY': rrules['byweekday'] = ','.join(self._check( 'weekdays', weekdays, list, choices=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'], default=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'], case='upper')) # In the same vein as the frequency check, we're accepting # case-insensitive input, comparing it to our known list of # acceptable responses, then joining them all together into a # comma-separated string. # if the frequency is monthly, then we will need to specify the day of # the month that the rule will run on. if frequency == 'MONTHLY': rrules['bymonthday'] = self._check('day_of_month', day_of_month, int, choices=list(range(1,32)), default=datetime.today().day) # Next we need to construct the rest of the payload payload = { 'name': self._check('name', name, str), 'description': self._check('description', description, str, default=''), 'schedule': { 'enabled': self._check('enabled', enabled, bool, default=True), 'starttime': self._check('start_time', start_time, datetime).strftime('%Y-%m-%d %H:%M:%S') if enabled is True else datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), 'endtime': self._check('end_time', end_time, datetime).strftime('%Y-%m-%d %H:%M:%S') if enabled is True else (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S'), 'timezone': self._check('timezone', timezone, str, choices=self._api._tz, default='Etc/UTC'), 'rrules': rrules } } # Lets check to make sure that the scanner_id is an integer as the API # documentation requests and if we don't raise an error, then lets make # the call. return self._api.post( 'scanners/{}/agents/exclusions'.format( self._check('scanner_id', scanner_id, int) ), json=payload).json() def delete(self, exclusion_id, scanner_id=1): ''' Delete an agent exclusion. :devportal:`agent-exclusions: delete <agent-exclusions-delete>` Args: exclusion_id (int): The id of the exclusion object in Tenable.io scanner_id (int, optional): The id of the scanner Returns: None: The Exclusion was successfully deleted Examples: >>> tio.agent_exclusions.delete(1) ''' self._api.delete('scanners/{}/agents/exclusions/{}'.format( self._check('scanner_id', scanner_id, int), self._check('exclusion_id', exclusion_id, int) )) def details(self, exclusion_id, scanner_id=1): ''' Retrieve the details for a specific agent exclusion. :devportal:`agent-exclusion: details <agent-exclusions-details>` Args: exclusion_id (int): The id of the exclusion object in Tenable.io scanner_id (int, optional): The id of the scanner Returns: dict: The exclusion resource dictionary. Examples: >>> exclusion = tio.agent_exclusions.details(1) ''' return self._api.get( 'scanners/{}/agents/exclusions/{}'.format( self._check('scanner_id', scanner_id, int), self._check('exclusion_id', exclusion_id, int) )).json() def edit(self, exclusion_id, scanner_id=1, name=None, start_time=None, end_time=None, timezone=None, description=None, frequency=None, interval=None, weekdays=None, day_of_month=None, enabled=None): ''' Edit an existing agent exclusion. :devportal:`agent-exclusions: edit <agent-exclusions-edit>` The edit function will first gather the details of the exclusion that will be edited and will overlay the changes on top. The result will then be pushed back to the API to modify the exclusion. Args: exclusion_id (int): The id of the exclusion object in Tenable.io scanner_id (int, optional): The scanner id. name (str, optional): The name of the exclusion to create. description (str, optional): Some further detail about the exclusion. start_time (datetime, optional): When the exclusion should start. end_time (datetime, optional): When the exclusion should end. timezone (str, optional): The timezone to use for the exclusion. The default if none is specified is to use UTC. frequency (str, optional): The frequency of the rule. The string inputted will be up-cased. Valid values are: *ONETIME, DAILY, WEEKLY, MONTHLY, YEARLY*. interval (int, optional): The interval of the rule. weekdays (list, optional): List of 2-character representations of the days of the week to repeat the frequency rule on. Valid values are: *SU, MO, TU, WE, TH, FR, SA* Default values: ``['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA']`` day_of_month (int, optional): The day of the month to repeat a **MONTHLY** frequency rule on. enabled (bool, optional): enable/disable exclusion. Returns: dict: Dictionary of the newly minted exclusion. Examples: >>> exclusion = tio.agent_exclusions.edit(1, name='New Name') ''' # Lets start constructing the payload to be sent to the API... payload = self.details(exclusion_id, scanner_id=scanner_id) if name: payload['name'] = self._check('name', name, str) if description: payload['description'] = self._check('description', description, str) if enabled is not None: payload['schedule']['enabled'] = self._check('enabled', enabled, bool) if payload['schedule']['enabled']: frequency = self._check('frequency', frequency, str, choices=['ONETIME', 'DAILY', 'WEEKLY', 'MONTHLY', 'YEARLY'], default=payload['schedule']['rrules']['freq'], case='upper') rrules = { 'freq': frequency, 'interval': payload['schedule']['rrules']['interval'], 'byweekday': None, 'bymonthday': None, } # frequency default value is designed for weekly and monthly based on below conditions # - if schedule rrules is not None and not defined in edit params, # and byweekday/bymonthday key already exist, assign old values # - if schedule rrules is not None and not defined in edit params # and byweekday/bymonthday key not already exist, assign default values # - if schedule rrules is not None and defined in edit params, assign new values if frequency == 'WEEKLY': rrules['byweekday'] = ','.join(self._check( 'weekdays', weekdays, list, choices=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'], default=payload['schedule']['rrules'].get('byweekday', '').split() or ['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'], case='upper')) # In the same vein as the frequency check, we're accepting # case-insensitive input, comparing it to our known list of # acceptable responses, then joining them all together into a # comma-separated string. if frequency == 'MONTHLY': rrules['bymonthday'] = self._check( 'day_of_month', day_of_month, int, choices=list(range(1, 32)), default=payload['schedule']['rrules'].get('bymonthday', datetime.today().day)) # update new rrules in existing payload dict_merge(payload['schedule']['rrules'], rrules) # remove null values from payload payload = dict_clean(payload) if start_time: payload['schedule']['starttime'] = self._check( 'start_time', start_time, datetime).strftime('%Y-%m-%d %H:%M:%S') if end_time: payload['schedule']['endtime'] = self._check( 'end_time', end_time, datetime).strftime('%Y-%m-%d %H:%M:%S') if interval: payload['schedule']['rrules']['interval'] = self._check( 'interval', interval, int) if timezone: payload['schedule']['timezone'] = self._check( 'timezone', timezone, str, choices=self._api._tz) # Lets check to make sure that the scanner_id and exclusion_id are # integers as the API documentation requests and if we don't raise an # error, then lets make the call. return self._api.put( 'scanners/{}/agents/exclusions/{}'.format( self._check('scanner_id', scanner_id, int), self._check('exclusion_id', exclusion_id, int) ), json=payload).json() def list(self, scanner_id=1): ''' Lists all of the currently configured agent exclusions. :devportal:`agent-exclusions: list <agent-exclusions-list>` Args: scanner_id (int, optional): The scanner identifier to be used. Returns: list: List of agent exclusions. Examples: >>> for exclusion in tio.agent_exclusions.list(): ... pprint(exclusion) ''' return self._api.get( 'scanners/{}/agents/exclusions'.format( self._check('scanner_id', scanner_id, int) )).json()['exclusions']
# Copyright 2019 Graphcore Ltd. import os from urllib import request import tarfile import subprocess import tempfile cifar10_data_dir = None def download_cifar(): """Download the CIFAR-10 dataset if it's not already available.""" DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' dir_name = 'cifar-10-batches-bin' filename = "cifar-10-binary.tar.gz" data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "Datasets") filepath = os.path.join(data_dir, dir_name) if not os.path.exists(filepath): with tempfile.TemporaryDirectory(dir=data_dir) as tmpdirname: tmpfilepath = os.path.join(tmpdirname, filename) print('Downloading', filename, "to", tmpfilepath) tmpfilepath, _ = request.urlretrieve(DATA_URL, tmpfilepath) print('Successfully downloaded, extracting to', tmpdirname) tarfile.open(tmpfilepath, 'r:gz').extractall(tmpdirname) print('Moving', tmpdirname, "to", data_dir) try: os.rename(os.path.join(tmpdirname, dir_name), os.path.join(data_dir, dir_name)) except OSError: pass return os.path.join(data_dir, dir_name) cifar10_data_dir = download_cifar() def run_train(**kwargs): os.chdir(os.path.dirname(os.path.realpath(__file__))) os.chdir('..') cmd = ['python3', 'train.py'] args = [str(item) for sublist in kwargs.items() for item in sublist if item != ''] cmd.extend(args) return subprocess.check_output(cmd).decode('utf-8') def run_restore(mypath, **kwargs): cmd = ['python3', 'restore.py'] args = [str(item) for sublist in kwargs.items() for item in sublist if item != ''] cmd.extend(args) return subprocess.check_output(cmd, cwd=mypath).decode('utf-8') def run_validation(mypath, **kwargs): cmd = ['python3', 'validation.py'] args = [str(item) for sublist in kwargs.items() for item in sublist if item != ''] cmd.extend(args) return subprocess.check_output(cmd, cwd=mypath).decode('utf-8') def parse_csv(filepath): with open(filepath) as csv: lines = csv.read().split('\n') items = [line.split(',') for line in lines if line] results = {} # dict with headers of csv as keys for i in range(len(items[0])): values = [float(v[i]) for v in items[1:]] results[items[0][i]] = values return results def get_csv(out, name): log_dir = None for line in out.split('\n'): if line.find('Saving to ') != -1: log_dir = line[11:] break if not log_dir: raise ValueError("Couldn't find log directory from output") return parse_csv(os.path.join(log_dir, name))
# License: BSD 3-Clause from collections import OrderedDict import pickle import time from typing import Any, IO, TextIO, List, Union, Tuple, Optional, Dict # noqa F401 import os import arff import numpy as np import openml import openml._api_calls from openml.base import OpenMLBase from ..exceptions import PyOpenMLError from ..flows import get_flow from ..tasks import ( get_task, TaskType, OpenMLClassificationTask, OpenMLLearningCurveTask, OpenMLClusteringTask, OpenMLRegressionTask, ) class OpenMLRun(OpenMLBase): """OpenML Run: result of running a model on an openml dataset. Parameters ---------- task_id: int flow_id: int dataset_id: int setup_string: str output_files: Dict[str, str] A dictionary that specifies where each related file can be found. setup_id: int tags: List[str] uploader: int User ID of the uploader. uploader_name: str evaluations: Dict fold_evaluations: Dict sample_evaluations: Dict data_content: List[List] The predictions generated from executing this run. trace: OpenMLRunTrace model: object task_type: str task_evaluation_measure: str flow_name: str parameter_settings: List[OrderedDict] predictions_url: str task: OpenMLTask flow: OpenMLFlow run_id: int description_text: str, optional Description text to add to the predictions file. If left None, """ def __init__( self, task_id, flow_id, dataset_id, setup_string=None, output_files=None, setup_id=None, tags=None, uploader=None, uploader_name=None, evaluations=None, fold_evaluations=None, sample_evaluations=None, data_content=None, trace=None, model=None, task_type=None, task_evaluation_measure=None, flow_name=None, parameter_settings=None, predictions_url=None, task=None, flow=None, run_id=None, description_text=None, ): self.uploader = uploader self.uploader_name = uploader_name self.task_id = task_id self.task_type = task_type self.task_evaluation_measure = task_evaluation_measure self.flow_id = flow_id self.flow_name = flow_name self.setup_id = setup_id self.setup_string = setup_string self.parameter_settings = parameter_settings self.dataset_id = dataset_id self.evaluations = evaluations self.fold_evaluations = fold_evaluations self.sample_evaluations = sample_evaluations self.data_content = data_content self.output_files = output_files self.trace = trace self.error_message = None self.task = task self.flow = flow self.run_id = run_id self.model = model self.tags = tags self.predictions_url = predictions_url self.description_text = description_text @property def id(self) -> Optional[int]: return self.run_id def _get_repr_body_fields(self) -> List[Tuple[str, Union[str, int, List[str]]]]: """ Collect all information to display in the __repr__ body. """ fields = { "Uploader Name": self.uploader_name, "Metric": self.task_evaluation_measure, "Run ID": self.run_id, "Task ID": self.task_id, "Task Type": self.task_type, "Task URL": openml.tasks.OpenMLTask.url_for_id(self.task_id), "Flow ID": self.flow_id, "Flow Name": self.flow_name, "Flow URL": openml.flows.OpenMLFlow.url_for_id(self.flow_id), "Setup ID": self.setup_id, "Setup String": self.setup_string, "Dataset ID": self.dataset_id, "Dataset URL": openml.datasets.OpenMLDataset.url_for_id(self.dataset_id), } if self.uploader is not None: fields["Uploader Profile"] = "{}/u/{}".format( openml.config.get_server_base_url(), self.uploader ) if self.run_id is not None: fields["Run URL"] = self.openml_url if self.evaluations is not None and self.task_evaluation_measure in self.evaluations: fields["Result"] = self.evaluations[self.task_evaluation_measure] # determines the order in which the information will be printed order = [ "Uploader Name", "Uploader Profile", "Metric", "Result", "Run ID", "Run URL", "Task ID", "Task Type", "Task URL", "Flow ID", "Flow Name", "Flow URL", "Setup ID", "Setup String", "Dataset ID", "Dataset URL", ] return [(key, fields[key]) for key in order if key in fields] @classmethod def from_filesystem(cls, directory: str, expect_model: bool = True) -> "OpenMLRun": """ The inverse of the to_filesystem method. Instantiates an OpenMLRun object based on files stored on the file system. Parameters ---------- directory : str a path leading to the folder where the results are stored expect_model : bool if True, it requires the model pickle to be present, and an error will be thrown if not. Otherwise, the model might or might not be present. Returns ------- run : OpenMLRun the re-instantiated run object """ # Avoiding cyclic imports import openml.runs.functions if not os.path.isdir(directory): raise ValueError("Could not find folder") description_path = os.path.join(directory, "description.xml") predictions_path = os.path.join(directory, "predictions.arff") trace_path = os.path.join(directory, "trace.arff") model_path = os.path.join(directory, "model.pkl") if not os.path.isfile(description_path): raise ValueError("Could not find description.xml") if not os.path.isfile(predictions_path): raise ValueError("Could not find predictions.arff") if not os.path.isfile(model_path) and expect_model: raise ValueError("Could not find model.pkl") with open(description_path, "r") as fht: xml_string = fht.read() run = openml.runs.functions._create_run_from_xml(xml_string, from_server=False) if run.flow_id is None: flow = openml.flows.OpenMLFlow.from_filesystem(directory) run.flow = flow run.flow_name = flow.name with open(predictions_path, "r") as fht: predictions = arff.load(fht) run.data_content = predictions["data"] if os.path.isfile(model_path): # note that it will load the model if the file exists, even if # expect_model is False with open(model_path, "rb") as fhb: run.model = pickle.load(fhb) if os.path.isfile(trace_path): run.trace = openml.runs.OpenMLRunTrace._from_filesystem(trace_path) return run def to_filesystem(self, directory: str, store_model: bool = True,) -> None: """ The inverse of the from_filesystem method. Serializes a run on the filesystem, to be uploaded later. Parameters ---------- directory : str a path leading to the folder where the results will be stored. Should be empty store_model : bool, optional (default=True) if True, a model will be pickled as well. As this is the most storage expensive part, it is often desirable to not store the model. """ if self.data_content is None or self.model is None: raise ValueError("Run should have been executed (and contain " "model / predictions)") os.makedirs(directory, exist_ok=True) if not os.listdir(directory) == []: raise ValueError( "Output directory {} should be empty".format(os.path.abspath(directory)) ) run_xml = self._to_xml() predictions_arff = arff.dumps(self._generate_arff_dict()) # It seems like typing does not allow to define the same variable multiple times with open(os.path.join(directory, "description.xml"), "w") as fh: # type: TextIO fh.write(run_xml) with open(os.path.join(directory, "predictions.arff"), "w") as fh: fh.write(predictions_arff) if store_model: with open(os.path.join(directory, "model.pkl"), "wb") as fh_b: # type: IO[bytes] pickle.dump(self.model, fh_b) if self.flow_id is None: self.flow.to_filesystem(directory) if self.trace is not None: self.trace._to_filesystem(directory) def _generate_arff_dict(self) -> "OrderedDict[str, Any]": """Generates the arff dictionary for uploading predictions to the server. Assumes that the run has been executed. Returns ------- arf_dict : dict Dictionary representation of the ARFF file that will be uploaded. Contains predictions and information about the run environment. """ if self.data_content is None: raise ValueError("Run has not been executed.") if self.flow is None: self.flow = get_flow(self.flow_id) if self.description_text is None: self.description_text = time.strftime("%c") task = get_task(self.task_id) arff_dict = OrderedDict() # type: 'OrderedDict[str, Any]' arff_dict["data"] = self.data_content arff_dict["description"] = self.description_text arff_dict["relation"] = "openml_task_{}_predictions".format(task.task_id) if isinstance(task, OpenMLLearningCurveTask): class_labels = task.class_labels instance_specifications = [ ("repeat", "NUMERIC"), ("fold", "NUMERIC"), ("sample", "NUMERIC"), ("row_id", "NUMERIC"), ] arff_dict["attributes"] = instance_specifications if class_labels is not None: arff_dict["attributes"] = ( arff_dict["attributes"] + [ ("confidence." + class_labels[i], "NUMERIC") for i in range(len(class_labels)) ] + [("prediction", class_labels), ("correct", class_labels)] ) else: raise ValueError("The task has no class labels") elif isinstance(task, OpenMLClassificationTask): class_labels = task.class_labels instance_specifications = [ ("repeat", "NUMERIC"), ("fold", "NUMERIC"), ("sample", "NUMERIC"), # Legacy ("row_id", "NUMERIC"), ] arff_dict["attributes"] = instance_specifications if class_labels is not None: prediction_confidences = [ ("confidence." + class_labels[i], "NUMERIC") for i in range(len(class_labels)) ] prediction_and_true = [("prediction", class_labels), ("correct", class_labels)] arff_dict["attributes"] = ( arff_dict["attributes"] + prediction_confidences + prediction_and_true ) else: raise ValueError("The task has no class labels") elif isinstance(task, OpenMLRegressionTask): arff_dict["attributes"] = [ ("repeat", "NUMERIC"), ("fold", "NUMERIC"), ("row_id", "NUMERIC"), ("prediction", "NUMERIC"), ("truth", "NUMERIC"), ] elif isinstance(task, OpenMLClusteringTask): arff_dict["attributes"] = [ ("repeat", "NUMERIC"), ("fold", "NUMERIC"), ("row_id", "NUMERIC"), ("cluster", "NUMERIC"), ] else: raise NotImplementedError("Task type %s is not yet supported." % str(task.task_type)) return arff_dict def get_metric_fn(self, sklearn_fn, kwargs=None): """Calculates metric scores based on predicted values. Assumes the run has been executed locally (and contains run_data). Furthermore, it assumes that the 'correct' or 'truth' attribute is specified in the arff (which is an optional field, but always the case for openml-python runs) Parameters ---------- sklearn_fn : function a function pointer to a sklearn function that accepts ``y_true``, ``y_pred`` and ``**kwargs`` Returns ------- scores : list a list of floats, of length num_folds * num_repeats """ kwargs = kwargs if kwargs else dict() if self.data_content is not None and self.task_id is not None: predictions_arff = self._generate_arff_dict() elif "predictions" in self.output_files: predictions_file_url = openml._api_calls._file_id_to_url( self.output_files["predictions"], "predictions.arff", ) response = openml._api_calls._download_text_file(predictions_file_url) predictions_arff = arff.loads(response) # TODO: make this a stream reader else: raise ValueError( "Run should have been locally executed or " "contain outputfile reference." ) # Need to know more about the task to compute scores correctly task = get_task(self.task_id) attribute_names = [att[0] for att in predictions_arff["attributes"]] if ( task.task_type_id in [TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE] and "correct" not in attribute_names ): raise ValueError('Attribute "correct" should be set for ' "classification task runs") if task.task_type_id == TaskType.SUPERVISED_REGRESSION and "truth" not in attribute_names: raise ValueError('Attribute "truth" should be set for ' "regression task runs") if task.task_type_id != TaskType.CLUSTERING and "prediction" not in attribute_names: raise ValueError('Attribute "predict" should be set for ' "supervised task runs") def _attribute_list_to_dict(attribute_list): # convenience function: Creates a mapping to map from the name of # attributes present in the arff prediction file to their index. # This is necessary because the number of classes can be different # for different tasks. res = OrderedDict() for idx in range(len(attribute_list)): res[attribute_list[idx][0]] = idx return res attribute_dict = _attribute_list_to_dict(predictions_arff["attributes"]) repeat_idx = attribute_dict["repeat"] fold_idx = attribute_dict["fold"] predicted_idx = attribute_dict["prediction"] # Assume supervised task if ( task.task_type_id == TaskType.SUPERVISED_CLASSIFICATION or task.task_type_id == TaskType.LEARNING_CURVE ): correct_idx = attribute_dict["correct"] elif task.task_type_id == TaskType.SUPERVISED_REGRESSION: correct_idx = attribute_dict["truth"] has_samples = False if "sample" in attribute_dict: sample_idx = attribute_dict["sample"] has_samples = True if ( predictions_arff["attributes"][predicted_idx][1] != predictions_arff["attributes"][correct_idx][1] ): pred = predictions_arff["attributes"][predicted_idx][1] corr = predictions_arff["attributes"][correct_idx][1] raise ValueError( "Predicted and Correct do not have equal values:" " %s Vs. %s" % (str(pred), str(corr)) ) # TODO: these could be cached values_predict = {} values_correct = {} for line_idx, line in enumerate(predictions_arff["data"]): rep = line[repeat_idx] fold = line[fold_idx] if has_samples: samp = line[sample_idx] else: samp = 0 # No learning curve sample, always 0 if task.task_type_id in [ TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE, ]: prediction = predictions_arff["attributes"][predicted_idx][1].index( line[predicted_idx] ) correct = predictions_arff["attributes"][predicted_idx][1].index(line[correct_idx]) elif task.task_type_id == TaskType.SUPERVISED_REGRESSION: prediction = line[predicted_idx] correct = line[correct_idx] if rep not in values_predict: values_predict[rep] = OrderedDict() values_correct[rep] = OrderedDict() if fold not in values_predict[rep]: values_predict[rep][fold] = OrderedDict() values_correct[rep][fold] = OrderedDict() if samp not in values_predict[rep][fold]: values_predict[rep][fold][samp] = [] values_correct[rep][fold][samp] = [] values_predict[rep][fold][samp].append(prediction) values_correct[rep][fold][samp].append(correct) scores = [] for rep in values_predict.keys(): for fold in values_predict[rep].keys(): last_sample = len(values_predict[rep][fold]) - 1 y_pred = values_predict[rep][fold][last_sample] y_true = values_correct[rep][fold][last_sample] scores.append(sklearn_fn(y_true, y_pred, **kwargs)) return np.array(scores) def _parse_publish_response(self, xml_response: Dict): """ Parse the id from the xml_response and assign it to self. """ self.run_id = int(xml_response["oml:upload_run"]["oml:run_id"]) def _get_file_elements(self) -> Dict: """ Get file_elements to upload to the server. Derived child classes should overwrite this method as necessary. The description field will be populated automatically if not provided. """ if self.parameter_settings is None and self.model is None: raise PyOpenMLError( "OpenMLRun must contain a model or be initialized with parameter_settings." ) if self.flow_id is None: if self.flow is None: raise PyOpenMLError( "OpenMLRun object does not contain a flow id or reference to OpenMLFlow " "(these should have been added while executing the task). " ) else: # publish the linked Flow before publishing the run. self.flow.publish() self.flow_id = self.flow.flow_id if self.parameter_settings is None: if self.flow is None: self.flow = openml.flows.get_flow(self.flow_id) self.parameter_settings = self.flow.extension.obtain_parameter_values( self.flow, self.model, ) file_elements = {"description": ("description.xml", self._to_xml())} if self.error_message is None: predictions = arff.dumps(self._generate_arff_dict()) file_elements["predictions"] = ("predictions.arff", predictions) if self.trace is not None: trace_arff = arff.dumps(self.trace.trace_to_arff()) file_elements["trace"] = ("trace.arff", trace_arff) return file_elements def _to_dict(self) -> "OrderedDict[str, OrderedDict]": """ Creates a dictionary representation of self. """ description = OrderedDict() # type: 'OrderedDict' description["oml:run"] = OrderedDict() description["oml:run"]["@xmlns:oml"] = "http://openml.org/openml" description["oml:run"]["oml:task_id"] = self.task_id description["oml:run"]["oml:flow_id"] = self.flow_id if self.error_message is not None: description["oml:run"]["oml:error_message"] = self.error_message description["oml:run"]["oml:parameter_setting"] = self.parameter_settings if self.tags is not None: description["oml:run"]["oml:tag"] = self.tags # Tags describing the run if (self.fold_evaluations is not None and len(self.fold_evaluations) > 0) or ( self.sample_evaluations is not None and len(self.sample_evaluations) > 0 ): description["oml:run"]["oml:output_data"] = OrderedDict() description["oml:run"]["oml:output_data"]["oml:evaluation"] = list() if self.fold_evaluations is not None: for measure in self.fold_evaluations: for repeat in self.fold_evaluations[measure]: for fold, value in self.fold_evaluations[measure][repeat].items(): current = OrderedDict( [ ("@repeat", str(repeat)), ("@fold", str(fold)), ("oml:name", measure), ("oml:value", str(value)), ] ) description["oml:run"]["oml:output_data"]["oml:evaluation"].append(current) if self.sample_evaluations is not None: for measure in self.sample_evaluations: for repeat in self.sample_evaluations[measure]: for fold in self.sample_evaluations[measure][repeat]: for sample, value in self.sample_evaluations[measure][repeat][fold].items(): current = OrderedDict( [ ("@repeat", str(repeat)), ("@fold", str(fold)), ("@sample", str(sample)), ("oml:name", measure), ("oml:value", str(value)), ] ) description["oml:run"]["oml:output_data"]["oml:evaluation"].append( current ) return description
# -*- coding: utf-8 -*- import sys def translate(seq): geneticCode = { 'UUU':'F', 'UUC':'F', 'UUA':'L', 'UUG':'L', #UU 'UCU':'S', 'UCC':'S', 'UCA':'L', 'UCG':'L', #UC 'UAU':'Y', 'UAC':'Y', 'UAA':'ST', 'UAG':'ST', #UA 'UGU':'C', 'UGC':'C', 'UGA':'ST', 'UGG':'W', #UG 'CUU':'L', 'CUC':'L', 'CUA':'L', 'CUG':'L', #CU 'CCU':'P', 'CCC':'P', 'CCA':'P', 'CCG':'P', #CC 'CAU':'H', 'CAC':'H', 'CAA':'Q', 'CAG':'Q', #CA 'CGU':'R', 'CGC':'R', 'CGA':'R', 'CGG':'R', #CG 'AUU':'I', 'AUC':'I', 'AUA':'I', 'AUG':'M', #AU 'ACU':'T', 'ACC':'T', 'ACA':'T', 'ACG':'T', #AC 'AAU':'N', 'AAC':'N', 'AAA':'K', 'AAG':'K', #AA 'AGU':'S', 'AGC':'S', 'AGA':'A', 'AGG':'A', #AG 'GUU':'V', 'GUC':'V', 'GUA':'V', 'GUG':'V', #GU 'GCU':'A', 'GCC':'A', 'GCA':'A', 'GCG':'A', #GC 'GAU':'D', 'GAC':'D', 'GAA':'E', 'GAG':'E', #GA 'GGU':'G', 'GGC':'G', 'GGA':'G', 'GGG':'G' #GG } protein ="" if len(seq)%3 == 0: for i in range(0, len(seq), 3): codon = seq[i:i + 3] protein += geneticCode[codon] #print(protein) return protein def read_file(name): try: with open(name, "r") as file: seq = file.read() except IOError as fnf_error: print(fnf_error) print("\n") return False seq = seq.replace("\n", "") seq = seq.replace("\r", "") seq = seq.replace(".", "") print("\nmRna: " + seq) print("\n") print("Proteina: " + translate(seq)) return True def main(): if sys.version_info.major == 2: name = raw_input("Digite o nome do arquivo com a sua extenção (.txt): ") elif sys.version_info.major == 3: name = input("Digite o nome do arquivo com a sua extenção (.txt): ") while read_file(name) == False: if sys.version_info.major == 2: name = raw_input("Digite o nome do arquivo com a sua extenção (.txt): ") elif sys.version_info.major == 3: name = input("Digite o nome do arquivo com a sua extenção (.txt): ") main()
import requests from collections import OrderedDict from ..exceptions import ElevationApiError from ..geometry import Point, LineString def elevation(path, api_key=None, sampling=50): """ Google elevation API backend """ url = 'https://maps.googleapis.com/maps/api/elevation/json' params = {} points = [] # add api key if present if api_key: params['key'] = api_key # convert path in list of Point objects for latlng in path.split('|'): latlng = latlng.split(',') points.append(Point(float(latlng[1]), float(latlng[0]))) if len(points) > 1: # length of the path in meters length = LineString(points).length * 100000 # get 1 point every x meters, where x is defined in # ELEVATION_DEFAULT_SAMPLING samples = int(round(length / sampling)) # use the automatically calculated value as long as it is compatibile # with the API usage limits if samples > 512: samples = 512 # at least 2 samples elif samples < 2: samples = 2 params['samples'] = samples params['path'] = path else: params['locations'] = path # send request to Google Elevation API response = requests.get(url, params=params) data = response.json() # if ok convert to GeoJSON if 'status' in data and data['status'] == 'OK': # if more than one result use LineString if len(data['results']) > 1: geometry = 'LineString' # else use Point else: geometry = 'Point' # lng, lat, z coordinates coordinates = [] for point in data['results']: coordinates.append([point['location']['lng'], point['location']['lat'], point['elevation']]) return OrderedDict(( ('type', 'Feature'), ('geometry', OrderedDict(( ('type', geometry), ('coordinates', coordinates) ))) )) # else return original response else: raise ElevationApiError( "Google Elevation API error:\n\n{0}".format(response.content))
from internal import *
import argparse import numpy as np from sklearn.cluster import KMeans from sklearn.preprocessing import normalize import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.optim import Adam import utils from model import GAT from evaluation import eva def pretrain(dataset): model = GAT( num_features=args.input_dim, hidden_size=args.hidden_size, embedding_size=args.embedding_size, alpha=args.alpha, ).to(device) print(model) optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # data process dataset = utils.data_preprocessing(dataset) adj = dataset.adj.to(device) adj_label = dataset.adj_label.to(device) M = utils.get_M(adj).to(device) # data and label x = torch.Tensor(dataset.x).to(device) y = dataset.y.cpu().numpy() for epoch in range(args.max_epoch): model.train() A_pred, z = model(x, adj, M) loss = F.binary_cross_entropy(A_pred.view(-1), adj_label.view(-1)) optimizer.zero_grad() loss.backward() optimizer.step() with torch.no_grad(): _, z = model(x, adj, M) kmeans = KMeans(n_clusters=args.n_clusters, n_init=20).fit( z.data.cpu().numpy() ) acc, nmi, ari, f1 = eva(y, kmeans.labels_, epoch) if epoch % 5 == 0: torch.save( model.state_dict(), f"./pretrain/predaegc_{args.name}_{epoch}.pkl" ) if __name__ == "__main__": parser = argparse.ArgumentParser( description="train", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--name", type=str, default="Citeseer") parser.add_argument("--max_epoch", type=int, default=100) parser.add_argument("--lr", type=float, default=0.001) parser.add_argument("--n_clusters", default=6, type=int) parser.add_argument("--hidden_size", default=256, type=int) parser.add_argument("--embedding_size", default=16, type=int) parser.add_argument("--weight_decay", type=int, default=5e-3) parser.add_argument( "--alpha", type=float, default=0.2, help="Alpha for the leaky_relu." ) args = parser.parse_args() args.cuda = torch.cuda.is_available() print("use cuda: {}".format(args.cuda)) device = torch.device("cuda" if args.cuda else "cpu") datasets = utils.get_dataset(args.name) dataset = datasets[0] if args.name == "Citeseer": args.lr = 0.005 args.k = None args.n_clusters = 6 elif args.name == "Cora": args.lr = 0.005 args.k = None args.n_clusters = 7 elif args.name == "Pubmed": args.lr = 0.001 args.k = None args.n_clusters = 3 else: args.k = None args.input_dim = dataset.num_features print(args) pretrain(dataset)
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019-2020 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_session_ttl short_description: Configure global session TTL timers for this FortiGate in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system feature and session_ttl category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.0 version_added: "2.10" author: - Link Zheng (@chillancezen) - Jie Xue (@JieX19) - Hongbin Lu (@fgtdev-hblu) - Frank Shen (@frankshen01) - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks requirements: - ansible>=2.9.0 options: access_token: description: - Token-based authentication. Generated from GUI of Fortigate. type: str required: false enable_log: description: - Enable/Disable logging for task. type: bool required: false default: false vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root member_path: type: str description: - Member attribute path to operate on. - Delimited by a slash character if there are more than one attribute. - Parameter marked with member_path is legitimate for doing member operation. member_state: type: str description: - Add or delete a member under specified attribute path. - When member_state is specified, the state option is ignored. choices: - present - absent system_session_ttl: description: - Configure global session TTL timers for this FortiGate. default: null type: dict suboptions: default: description: - Default timeout. type: str port: description: - Session TTL port. type: list suboptions: end_port: description: - End port number. type: int id: description: - Table entry ID. required: true type: int protocol: description: - Protocol (0 - 255). type: int start_port: description: - Start port number. type: int timeout: description: - Session timeout (TTL). type: str ''' EXAMPLES = ''' - collections: - fortinet.fortios connection: httpapi hosts: fortigate01 vars: ansible_httpapi_port: 443 ansible_httpapi_use_ssl: true ansible_httpapi_validate_certs: false vdom: root tasks: - name: fortios_system_session_ttl fortios_system_session_ttl: vdom: root system_session_ttl: default: '3600' ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG def filter_system_session_ttl_data(json): option_list = ['default', 'port'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for i, elem in enumerate(data): data[i] = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_session_ttl(data, fos): vdom = data['vdom'] system_session_ttl_data = data['system_session_ttl'] filtered_data = underscore_to_hyphen(filter_system_session_ttl_data(system_session_ttl_data)) return fos.set('system', 'session-ttl', data=filtered_data, vdom=vdom) def is_successful_status(resp): return 'status' in resp and resp['status'] == 'success' or \ 'http_status' in resp and resp['http_status'] == 200 or \ 'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404 def fortios_system(data, fos): fos.do_member_operation('system_session_ttl') if data['system_session_ttl']: resp = system_session_ttl(data, fos) else: fos._module.fail_json(msg='missing task body: %s' % ('system_session_ttl')) return not is_successful_status(resp), \ is_successful_status(resp) and \ (resp['revision_changed'] if 'revision_changed' in resp else True), \ resp versioned_schema = { "type": "dict", "children": { "default": { "type": "string", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } }, "port": { "type": "list", "children": { "protocol": { "type": "integer", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } }, "end_port": { "type": "integer", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } }, "start_port": { "type": "integer", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } }, "id": { "type": "integer", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } }, "timeout": { "type": "string", "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } } }, "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } } }, "revisions": { "v6.0.0": True, "v7.0.0": True, "v6.0.5": True, "v6.4.4": True, "v7.0.1": True, "v6.4.0": True, "v6.4.1": True, "v6.2.0": True, "v6.2.3": True, "v6.2.5": True, "v6.2.7": True, "v6.0.11": True } } def main(): module_spec = schema_to_module_spec(versioned_schema) mkeyname = None fields = { "access_token": {"required": False, "type": "str", "no_log": True}, "enable_log": {"required": False, "type": bool}, "vdom": {"required": False, "type": "str", "default": "root"}, "member_path": {"required": False, "type": "str"}, "member_state": { "type": "str", "required": False, "choices": ["present", "absent"] }, "system_session_ttl": { "required": False, "type": "dict", "default": None, "options": { } } } for attribute_name in module_spec['options']: fields["system_session_ttl"]['options'][attribute_name] = module_spec['options'][attribute_name] if mkeyname and mkeyname == attribute_name: fields["system_session_ttl"]['options'][attribute_name]['required'] = True check_legacy_fortiosapi() module = AnsibleModule(argument_spec=fields, supports_check_mode=False) versions_check_result = None if module._socket_path: connection = Connection(module._socket_path) if 'access_token' in module.params: connection.set_option('access_token', module.params['access_token']) if 'enable_log' in module.params: connection.set_option('enable_log', module.params['enable_log']) else: connection.set_option('enable_log', False) fos = FortiOSHandler(connection, module, mkeyname) versions_check_result = check_schema_versioning(fos, versioned_schema, "system_session_ttl") is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) if versions_check_result and versions_check_result['matched'] is False: module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv") if not is_error: if versions_check_result and versions_check_result['matched'] is False: module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result) else: module.exit_json(changed=has_changed, meta=result) else: if versions_check_result and versions_check_result['matched'] is False: module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
# third party from flask_sockets import Sockets from main import ws from nacl.encoding import HexEncoder from nacl.signing import SigningKey # grid relative from ..routes import association_requests_blueprint from ..routes import dcfl_blueprint from ..routes import groups_blueprint from ..routes import mcfl_blueprint from ..routes import roles_blueprint from ..routes import root_blueprint from ..routes import search_blueprint from ..routes import setup_blueprint from ..routes import users_blueprint from ..utils.executor import executor from .nodes.domain import GridDomain from .nodes.network import GridNetwork from .nodes.worker import GridWorker from .sleepy_until_configured import SleepyUntilConfigured node = None def get_node(): global node return node def create_worker_app(app, args): # Register HTTP blueprints # Here you should add all the blueprints related to HTTP routes. app.register_blueprint(root_blueprint, url_prefix=r"/") # Register WebSocket blueprints # Here you should add all the blueprints related to WebSocket routes. global node node = GridWorker(name=args.name, domain_url=args.domain_address) app.config["EXECUTOR_PROPAGATE_EXCEPTIONS"] = True app.config["EXECUTOR_TYPE"] = "thread" executor.init_app(app) return app def create_network_app(app, args, testing=False): test_config = None if args.start_local_db: test_config = {"SQLALCHEMY_DATABASE_URI": "sqlite:///nodedatabase.db"} app.register_blueprint(roles_blueprint, url_prefix=r"/roles") app.register_blueprint(users_blueprint, url_prefix=r"/users") app.register_blueprint(setup_blueprint, url_prefix=r"/setup") app.register_blueprint(root_blueprint, url_prefix=r"/") app.register_blueprint(search_blueprint, url_prefix=r"/search") app.register_blueprint( association_requests_blueprint, url_prefix=r"/association-requests" ) # Register WebSocket blueprints # Here you should add all the blueprints related to WebSocket routes. # grid relative from .database import Role from .database import User from .database import db from .database import seed_db from .database import set_database_config global node node = GridNetwork(name=args.name) # Set SQLAlchemy configs set_database_config(app, test_config=test_config) s = app.app_context().push() db.create_all() if not testing: if len(db.session.query(Role).all()) == 0: seed_db() role = db.session.query(Role.id).filter_by(name="Owner").first() user = User.query.filter_by(role=role.id).first() if user: signing_key = SigningKey( user.private_key.encode("utf-8"), encoder=HexEncoder ) node.signing_key = signing_key node.verify_key = node.signing_key.verify_key node.root_verify_key = node.verify_key db.session.commit() app.config["EXECUTOR_PROPAGATE_EXCEPTIONS"] = True app.config["EXECUTOR_TYPE"] = "thread" executor.init_app(app) return app def create_domain_app(app, args, testing=False): test_config = None if args.start_local_db: test_config = {"SQLALCHEMY_DATABASE_URI": "sqlite:///nodedatabase.db"} # Bind websocket in Flask app instance sockets = Sockets(app) # Register HTTP blueprints # Here you should add all the blueprints related to HTTP routes. app.register_blueprint(roles_blueprint, url_prefix=r"/roles") app.register_blueprint(users_blueprint, url_prefix=r"/users") app.register_blueprint(setup_blueprint, url_prefix=r"/setup") app.register_blueprint(groups_blueprint, url_prefix=r"/groups") app.register_blueprint(dcfl_blueprint, url_prefix=r"/data-centric") app.register_blueprint(mcfl_blueprint, url_prefix=r"/model-centric") app.register_blueprint(root_blueprint, url_prefix=r"/") app.register_blueprint( association_requests_blueprint, url_prefix=r"/association-requests" ) # Register WebSocket blueprints # Here you should add all the blueprints related to WebSocket routes. sockets.register_blueprint(ws, url_prefix=r"/") # grid relative from .database import Role from .database import SetupConfig from .database import User from .database import db from .database import seed_db from .database import set_database_config global node node = GridDomain(name=args.name) # Set SQLAlchemy configs set_database_config(app, test_config=test_config) app.app_context().push() db.create_all() if not testing: if len(db.session.query(Role).all()) == 0: seed_db() if len(db.session.query(SetupConfig).all()) != 0: node.name = db.session.query(SetupConfig).first().domain_name role = db.session.query(Role.id).filter_by(name="Owner").first() user = User.query.filter_by(role=role.id).first() if user: signing_key = SigningKey( user.private_key.encode("utf-8"), encoder=HexEncoder ) node.signing_key = signing_key node.verify_key = node.signing_key.verify_key node.root_verify_key = node.verify_key # Register global middlewares # Always after context is pushed app.wsgi_app = SleepyUntilConfigured(app, app.wsgi_app) db.session.commit() app.config["EXECUTOR_PROPAGATE_EXCEPTIONS"] = True app.config["EXECUTOR_TYPE"] = "thread" executor.init_app(app) return app
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = ['ServerKeyArgs', 'ServerKey'] @pulumi.input_type class ServerKeyArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], server_key_type: pulumi.Input[Union[str, 'ServerKeyType']], server_name: pulumi.Input[str], key_name: Optional[pulumi.Input[str]] = None, uri: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a ServerKey resource. :param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param pulumi.Input[Union[str, 'ServerKeyType']] server_key_type: The server key type like 'ServiceManaged', 'AzureKeyVault'. :param pulumi.Input[str] server_name: The name of the server. :param pulumi.Input[str] key_name: The name of the server key to be operated on (updated or created). The key name is required to be in the format of 'vault_key_version'. For example, if the keyId is https://YourVaultName.vault.azure.net/keys/YourKeyName/YourKeyVersion, then the server key name should be formatted as: YourVaultName_YourKeyName_YourKeyVersion :param pulumi.Input[str] uri: The URI of the server key. If the ServerKeyType is AzureKeyVault, then the URI is required. """ pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "server_key_type", server_key_type) pulumi.set(__self__, "server_name", server_name) if key_name is not None: pulumi.set(__self__, "key_name", key_name) if uri is not None: pulumi.set(__self__, "uri", uri) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="serverKeyType") def server_key_type(self) -> pulumi.Input[Union[str, 'ServerKeyType']]: """ The server key type like 'ServiceManaged', 'AzureKeyVault'. """ return pulumi.get(self, "server_key_type") @server_key_type.setter def server_key_type(self, value: pulumi.Input[Union[str, 'ServerKeyType']]): pulumi.set(self, "server_key_type", value) @property @pulumi.getter(name="serverName") def server_name(self) -> pulumi.Input[str]: """ The name of the server. """ return pulumi.get(self, "server_name") @server_name.setter def server_name(self, value: pulumi.Input[str]): pulumi.set(self, "server_name", value) @property @pulumi.getter(name="keyName") def key_name(self) -> Optional[pulumi.Input[str]]: """ The name of the server key to be operated on (updated or created). The key name is required to be in the format of 'vault_key_version'. For example, if the keyId is https://YourVaultName.vault.azure.net/keys/YourKeyName/YourKeyVersion, then the server key name should be formatted as: YourVaultName_YourKeyName_YourKeyVersion """ return pulumi.get(self, "key_name") @key_name.setter def key_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key_name", value) @property @pulumi.getter def uri(self) -> Optional[pulumi.Input[str]]: """ The URI of the server key. If the ServerKeyType is AzureKeyVault, then the URI is required. """ return pulumi.get(self, "uri") @uri.setter def uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "uri", value) class ServerKey(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_key_type: Optional[pulumi.Input[Union[str, 'ServerKeyType']]] = None, server_name: Optional[pulumi.Input[str]] = None, uri: Optional[pulumi.Input[str]] = None, __props__=None): """ A server key. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] key_name: The name of the server key to be operated on (updated or created). The key name is required to be in the format of 'vault_key_version'. For example, if the keyId is https://YourVaultName.vault.azure.net/keys/YourKeyName/YourKeyVersion, then the server key name should be formatted as: YourVaultName_YourKeyName_YourKeyVersion :param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param pulumi.Input[Union[str, 'ServerKeyType']] server_key_type: The server key type like 'ServiceManaged', 'AzureKeyVault'. :param pulumi.Input[str] server_name: The name of the server. :param pulumi.Input[str] uri: The URI of the server key. If the ServerKeyType is AzureKeyVault, then the URI is required. """ ... @overload def __init__(__self__, resource_name: str, args: ServerKeyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ A server key. :param str resource_name: The name of the resource. :param ServerKeyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ServerKeyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_key_type: Optional[pulumi.Input[Union[str, 'ServerKeyType']]] = None, server_name: Optional[pulumi.Input[str]] = None, uri: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ServerKeyArgs.__new__(ServerKeyArgs) __props__.__dict__["key_name"] = key_name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name if server_key_type is None and not opts.urn: raise TypeError("Missing required property 'server_key_type'") __props__.__dict__["server_key_type"] = server_key_type if server_name is None and not opts.urn: raise TypeError("Missing required property 'server_name'") __props__.__dict__["server_name"] = server_name __props__.__dict__["uri"] = uri __props__.__dict__["creation_date"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["subregion"] = None __props__.__dict__["thumbprint"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:ServerKey"), pulumi.Alias(type_="azure-native:sql:ServerKey"), pulumi.Alias(type_="azure-nextgen:sql:ServerKey"), pulumi.Alias(type_="azure-native:sql/v20150501preview:ServerKey"), pulumi.Alias(type_="azure-nextgen:sql/v20150501preview:ServerKey"), pulumi.Alias(type_="azure-native:sql/v20200202preview:ServerKey"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:ServerKey"), pulumi.Alias(type_="azure-native:sql/v20201101preview:ServerKey"), pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:ServerKey")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(ServerKey, __self__).__init__( 'azure-native:sql/v20200801preview:ServerKey', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'ServerKey': """ Get an existing ServerKey resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = ServerKeyArgs.__new__(ServerKeyArgs) __props__.__dict__["creation_date"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["subregion"] = None __props__.__dict__["thumbprint"] = None __props__.__dict__["type"] = None return ServerKey(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="creationDate") def creation_date(self) -> pulumi.Output[str]: """ The server key creation date. """ return pulumi.get(self, "creation_date") @property @pulumi.getter def kind(self) -> pulumi.Output[str]: """ Kind of encryption protector. This is metadata used for the Azure portal experience. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter def subregion(self) -> pulumi.Output[str]: """ Subregion of the server key. """ return pulumi.get(self, "subregion") @property @pulumi.getter def thumbprint(self) -> pulumi.Output[str]: """ Thumbprint of the server key. """ return pulumi.get(self, "thumbprint") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type")
# backup-1.py import os #==================================================================== cwd = os.getcwd() prefix = cwd.split('/')[-1] # get current folder's name; not all stuff leading to it # visit the current directory and every one within it, recursively for dir_listing in os.walk(cwd): here = dir_listing[0] # full path 'here' for this iteration of the loop dirnames = dir_listing[1] # list of directories here filenames = dir_listing[2] # list of files here for filename in filenames: # don't upload hidden files if filename[0] == '.': continue # absolute, full path to the file on disk file_abspath = here + '/' + filename # S3 object key key = file_abspath[len(cwd):] if key[0] == '/': # cleaner S3 keys on Linux key = key[1:] # remove leading slash # prepend the prefix so files aren't all dumped straight into bucket root key = prefix + '/' + key print key
import sys sys.path.append("..") import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter import utils from metrics import calculate_kl as KL_DIV import config_bayesian as cfg from ..misc import ModuleWrapper class BBBLinear(ModuleWrapper): def __init__(self, in_features, out_features, bias=True): super(BBBLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.use_bias = bias self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.prior_mu = 0 self.prior_sigma = 0.1 self.W_mu = Parameter(torch.Tensor(out_features, in_features)) self.W_rho = Parameter(torch.Tensor(out_features, in_features)) if self.use_bias: self.bias_mu = Parameter(torch.Tensor(out_features)) self.bias_rho = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_rho', None) self.reset_parameters() def reset_parameters(self): self.W_mu.data.normal_(0, 0.1) self.W_rho.data.normal_(-3, 0.1) if self.use_bias: self.bias_mu.data.normal_(0, 0.1) self.bias_rho.data.normal_(-3, 0.1) def forward(self, x, sample=True): self.W_sigma = torch.log1p(torch.exp(self.W_rho)) if self.use_bias: self.bias_sigma = torch.log1p(torch.exp(self.bias_rho)) bias_var = self.bias_sigma ** 2 else: self.bias_sigma = bias_var = None act_mu = F.linear(x, self.W_mu, self.bias_mu) act_var = 1e-16 + F.linear(x ** 2, self.W_sigma ** 2, bias_var) act_std = torch.sqrt(act_var) if self.training or sample: eps = torch.empty(act_mu.size()).normal_(0, 1).to(self.device) return act_mu + act_std * eps else: return act_mu def kl_loss(self): kl = KL_DIV(self.prior_mu, self.prior_sigma, self.W_mu, self.W_sigma) if self.use_bias: kl += KL_DIV(self.prior_mu, self.prior_sigma, self.bias_mu, self.bias_sigma) return kl
#!/usr/bin/env python # -*- coding: utf-8 -*- #### s09_check_chrom1.py #### made by Min-Seok Kwon #### 2020-01-21 09:55:02 ######################### import sys import os SVRNAME = os.uname()[1] if "MBI" in SVRNAME.upper(): sys_path="/Users/pcaso/bin/python_lib" elif SVRNAME == "T7": sys_path="/ms1/bin/python_lib" else: sys_path="/home/mk446/bin/python_lib" sys.path.append(sys_path) import file_util import proc_util def s09_check_chrom1(chrom): i = 0 for k in range(400): vcfmap = {} for tsv in file_util.walk(path + "chr" + chrom + "/" + str(k) + "/", '_microannot.tsv'): k1 = int(tsv.split('/')[-1].split('_')[1]) vcfmap[tsv] = k1 (ks, vs) = struct_util.sortdict(vcfmap) prev_pos = 0 for tsv in ks: j = 0 log = str(i + 1) + ': ' + tsv print(log) for line in file_util.gzopen(tsv): # line = line.decode('UTF-8') if (i == 0 and line[0] == '#'): line = line.replace('\t.\t', '\tID\t') if (i == 0) or (i > 0 and line[0] != '#'): # print(line, end='') if line[0] != '#': arr = line.split('\t') pos = int(arr[1]) if pos < prev_pos: print('ERROR', i, arr, prev_pos) prev_pos = pos j += 1 if j % 100000 == 0: print('\t' + line[:30]) i += 1 if __name__ == "__main__": import struct_util path = "/home/mk446/mutanno/PRECALVEP/" s09_check_chrom1("1")
import os import pytest from dvc.output import base def test_stage_cache(tmp_dir, dvc, mocker): tmp_dir.gen("dep", "dep") tmp_dir.gen( "script.py", ( 'open("out", "w+").write("out"); ' 'open("out_no_cache", "w+").write("out_no_cache")' ), ) stage = dvc.run( cmd="python script.py", deps=["script.py", "dep"], outs=["out"], outs_no_cache=["out_no_cache"], single_stage=True, ) with dvc.lock: stage.remove(remove_outs=True, force=True) assert not (tmp_dir / "out").exists() assert not (tmp_dir / "out_no_cache").exists() assert not (tmp_dir / "out.dvc").exists() cache_dir = os.path.join( dvc.stage_cache.cache_dir, "10", "10b45372fdf4ec14d3f779c5b256378d7a12780e4c7f549a44138e492f098bfe", ) cache_file = os.path.join( cache_dir, "bb32e04c6da96a7192513390acedbe4cd6123f8fe5b0ba5fffe39716fe87f6f4", ) assert os.path.isdir(cache_dir) assert os.listdir(cache_dir) == [os.path.basename(cache_file)] assert os.path.isfile(cache_file) run_spy = mocker.patch("dvc.stage.run.cmd_run") checkout_spy = mocker.spy(base, "checkout") with dvc.lock: stage.run() assert not run_spy.called assert checkout_spy.call_count == 4 assert (tmp_dir / "out").exists() assert (tmp_dir / "out_no_cache").exists() assert (tmp_dir / "out").read_text() == "out" assert (tmp_dir / "out_no_cache").read_text() == "out_no_cache" def test_stage_cache_params(tmp_dir, dvc, mocker): tmp_dir.gen("params.yaml", "foo: 1\nbar: 2") tmp_dir.gen("myparams.yaml", "baz: 3\nqux: 4") tmp_dir.gen( "script.py", ( 'open("out", "w+").write("out"); ' 'open("out_no_cache", "w+").write("out_no_cache")' ), ) stage = dvc.run( cmd="python script.py", params=["foo,bar", "myparams.yaml:baz,qux"], outs=["out"], outs_no_cache=["out_no_cache"], single_stage=True, ) with dvc.lock: stage.remove(remove_outs=True, force=True) assert not (tmp_dir / "out").exists() assert not (tmp_dir / "out_no_cache").exists() assert not (tmp_dir / "out.dvc").exists() cache_dir = os.path.join( dvc.stage_cache.cache_dir, "65", "651d0a5b82e05e48b03acf44954f6a8599760e652a143d517a17d1065eca61a1", ) cache_file = os.path.join( cache_dir, "2196a5a4dd24c5759437511fcf9d6aa66b259e1dac58e3f212aefd1797a6f114", ) assert os.path.isdir(cache_dir) assert os.listdir(cache_dir) == [os.path.basename(cache_file)] assert os.path.isfile(cache_file) run_spy = mocker.patch("dvc.stage.run.cmd_run") checkout_spy = mocker.spy(base, "checkout") with dvc.lock: stage.run() assert not run_spy.called assert checkout_spy.call_count == 4 assert (tmp_dir / "out").exists() assert (tmp_dir / "out_no_cache").exists() assert (tmp_dir / "out").read_text() == "out" assert (tmp_dir / "out_no_cache").read_text() == "out_no_cache" def test_stage_cache_wdir(tmp_dir, dvc, mocker): tmp_dir.gen("dep", "dep") tmp_dir.gen( "script.py", ( 'open("out", "w+").write("out"); ' 'open("out_no_cache", "w+").write("out_no_cache")' ), ) tmp_dir.gen({"wdir": {}}) stage = dvc.run( cmd="python ../script.py", deps=["../script.py", "../dep"], outs=["out"], outs_no_cache=["out_no_cache"], single_stage=True, wdir="wdir", ) with dvc.lock: stage.remove(remove_outs=True, force=True) assert not (tmp_dir / "wdir" / "out").exists() assert not (tmp_dir / "wdir" / "out_no_cache").exists() assert not (tmp_dir / "wdir" / "out.dvc").exists() cache_dir = os.path.join( dvc.stage_cache.cache_dir, "d2", "d2b5da199f4da73a861027f5f76020a948794011db9704814fdb2a488ca93ec2", ) cache_file = os.path.join( cache_dir, "65cc63ade5ab338541726b26185ebaf42331141ec3a670a7d6e8a227505afade", ) assert os.path.isdir(cache_dir) assert os.listdir(cache_dir) == [os.path.basename(cache_file)] assert os.path.isfile(cache_file) run_spy = mocker.patch("dvc.stage.run.cmd_run") checkout_spy = mocker.spy(base, "checkout") with dvc.lock: stage.run() assert not run_spy.called assert checkout_spy.call_count == 4 assert (tmp_dir / "wdir" / "out").exists() assert (tmp_dir / "wdir" / "out_no_cache").exists() assert (tmp_dir / "wdir" / "out").read_text() == "out" assert (tmp_dir / "wdir" / "out_no_cache").read_text() == "out_no_cache" def test_shared_stage_cache(tmp_dir, dvc, run_copy): import stat from dvc.objects.db import ODBManager tmp_dir.gen("foo", "foo") with dvc.config.edit() as config: config["cache"]["shared"] = "group" dvc.odb = ODBManager(dvc) assert not os.path.exists(dvc.odb.local.cache_dir) run_copy("foo", "bar", name="copy-foo-bar") parent_cache_dir = os.path.join(dvc.stage_cache.cache_dir, "88",) cache_dir = os.path.join( parent_cache_dir, "883395068439203a9de3d1e1649a16e9027bfd1ab5dab4f438d321c4a928b328", ) cache_file = os.path.join( cache_dir, "e42b7ebb9bc5ac4bccab769c8d1338914dad25d7ffecc8671dbd4581bad4aa15", ) # sanity check assert os.path.isdir(cache_dir) assert os.listdir(cache_dir) == [os.path.basename(cache_file)] assert os.path.isfile(cache_file) def _mode(path): return stat.S_IMODE(os.stat(path).st_mode) if os.name == "nt": dir_mode = 0o777 file_mode = 0o666 else: dir_mode = 0o2775 file_mode = 0o664 assert _mode(dvc.odb.local.cache_dir) == dir_mode assert _mode(dvc.stage_cache.cache_dir) == dir_mode assert _mode(parent_cache_dir) == dir_mode assert _mode(cache_dir) == dir_mode assert _mode(cache_file) == file_mode @pytest.mark.parametrize( "kwargs", [ {}, {"cmd": "cmd"}, {"cmd": "cmd", "deps": ["path"]}, {"cmd": "cmd", "outs": ["path"]}, {"always_changed": True}, ], ) def test_unhashable(tmp_dir, dvc, mocker, kwargs): from dvc.stage import Stage, create_stage from dvc.stage.cache import RunCacheNotFoundError, StageCache cache = StageCache(dvc) stage = create_stage(Stage, path="stage.dvc", repo=dvc, **kwargs) get_stage_hash = mocker.patch("dvc.stage.cache._get_stage_hash") assert cache.save(stage) is None assert get_stage_hash.not_called with pytest.raises(RunCacheNotFoundError): cache.restore(stage) assert get_stage_hash.not_called
from setuptools import setup setup(name='scattering', version='0.0', description='Compute scattering functions', url='http://github.com/mattwthompson/scattering', author='Matthew W. Thompson', author_email='matt.thompson@vanderbilt.edu', license='MIT', packages=['scattering'], zip_safe=False)
import numpy as np import numpy import math import logging logger = logging.getLogger(__name__) # Set reasonable precision for comparing floats to zero. Originally the multiplier was # 10, but I needed to set this to 1000 because some of the trimesh distance methods # do not see as accurate as with primitive shapes. EPS_ZERO = np.finfo(float).eps * 1000 def on_aabb_surface(size, point, centre=(0.0, 0.0, 0.0), atol=EPS_ZERO): """ Surface test for axis-aligned bounding box with absolute distance tolerance along surface normal direction. >>> size = (1.0, 1.0, 1.0) >>> centre = (0.0, 0.0, 0.0) >>> pt = np.array([0.5, np.random.uniform(-0.5*size[1], 0.5*size[1]), np.random.uniform(-0.5*size[2], 0.5*size[2])]) >>> atol = 1e-8 >>> on_aabb_surface(size, pt, centre=centre, atol=1e-8) True >>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8) False >>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8) False """ origin = np.array(centre) - 0.5 * np.array(size) extent = np.array(centre) + 0.5 * np.array(size) # xmin xmin_point = np.array(point) xmin_point[0] = origin[0] # print("point: {}, xmin_point: {}".format(point, xmin_point)) xmin_dist = distance_between(point, xmin_point) # xmax xmax_point = np.array(point) xmax_point[0] = extent[0] # print("point: {}, xmax_point: {}".format(point, xmax_point)) xmax_dist = distance_between(point, xmax_point) # ymin ymin_point = np.array(point) ymin_point[1] = origin[1] ymin_dist = distance_between(point, ymin_point) # ymax ymax_point = np.array(point) ymax_point[1] = extent[1] ymax_dist = distance_between(point, ymax_point) # ymin zmin_point = np.array(point) zmin_point[2] = origin[2] zmin_dist = distance_between(point, zmin_point) # ymax zmax_point = np.array(point) zmax_point[2] = extent[2] zmax_dist = distance_between(point, zmax_point) dists = (xmin_dist, xmax_dist, ymin_dist, ymax_dist, zmin_dist, zmax_dist) tests = [np.abs(dist) < (atol / 2) for dist in dists] surfaces = np.where(np.array(tests) == True)[0].tolist() return np.any(tests), surfaces def aabb_intersection(min_point, max_point, ray_position, ray_direction): """ Returns an array intersection points with the ray and box using the method of Williams [1]. If no intersection occurs return `None`. Arguments --------- min_point: tuple like (x0, y0, z0) which is the minimum corner. box_size: tuple like (x1, y1, z1) which is the maximum corner. ray_position: tuple like (x, y, z), the ray origin. ray_direction: tuple like (i, j, k), the ray direction. Returns ------- intersections: tuple of (x, y, z) tuples or empty list. References ---------- [1] Amy Williams, Steve Barrus, R. Keith Morley, and Peter Shirley, "An Efficient and Robust Ray-Box Intersection Algorithm" Journal of graphics tools, 10(1):49-54, 2005 """ rpos = np.array(ray_position) rdir = np.array(ray_direction) origin = np.array(min_point) extent = np.array(max_point) pts = (origin, extent) rinvd = 1.0 / rdir rsgn = 1.0 / (rinvd < 0.0) tmin = (origin[rsgn[0]] - rpos[0]) * rinvd[0] tmax = (origin[1 - rsgn[0]] - rpos[0]) * rinvd[0] tymin = (extent[rsgn[1]] - rpos[1]) * rinvd[1] tymax = (extent[1 - rsgn[1]] - rpos[1]) * rinvd[1] if (tmin > tymax) or (tymin > tmax): return None if tymin > tmin: tmin = tymin if tymax < tmax: tmax = tymax tzmin = (extent[rsgn[2]] - rpos[2]) * rinvd[2] tzmax = (extent[1 - rsgn[2]] - rpos[2]) * rinvd[2] if (tmin > tzmax) or (tzmin > tmax): return None if tzmin > tmin: tmin = tzmin if tzmax < tmax: tmax = tzmax # Calculate the hit coordinates then if the solution is in # the forward direction append to the hit list. hit_coordinates = [] pt1 = tuple(rpos + tmin * rdir) pt2 = tuple(rpos + tmax * rdir) if tmin >= 0.0: hit_coordinates.append(pt1) if tmax >= 0.0: hit_coordinates.append(pt2) return tuple(hit_coordinates) def ray_z_cylinder(length, radius, ray_origin, ray_direction): """ Returns ray-cylinder intersection points for a cylinder aligned along the z-axis with centre at (0, 0, 0). Parameters ---------- length : float The length of the cylinder radius : float The radius of the cylinder ray_origin : tuple of float The origin of the ray like, e.g. :math:`\left(0.0, 1.0, 2.0 \\right)` ray_direction : tuple of float The direction **unit** vector of the ray like, e.g. :math:`(n_x, n_y, n_z)`. Returns ------- points: tuple of points Returns a tuple of tuple like ((0.0, 1.0, 2.0), ...) where each item is an intersection point. The tuple is sorted by distance from the ray origin. Notes ----- Equation of ray is [1], :math:`P(t) = E + t` where :math:`E` is the origin or "eye" point and :math:`D` is the direction vector. In component form, .. math:: \\begin{bmatrix} x(t) \\ y(t) \\ z(t) \\ \end{bmatrix} = \\begin{bmatrix} x_E + t x_D \\ y_E + t y_D \\ z_E + t z_D\\ \end{bmatrix} The equation of cylinder aligned along the z direction is, .. math:: x^2 + y^2 = R^2 where :math`R` is the radius of the cylinder. Substituting the equation of the ray into the equation of the cylinder, .. math:: (x_E + t x_D)^2 + (y_E + t y_D)^2 = R^2 and after grouping the :math:`t^2` and :math:`t` terms, .. math:: t^2\left(x_D^2 + y_D^2\\right) + t \left(2 x_E x_D + 2 y_E y _D \\right) + \left( x_E^2 + y_E^2 - R^2 \\right) = 0 which is a standard quadratic equation, .. math:: at^2 + bt + c = 0 Solution of this equation give two values :math:`\left( t_1, t_2 \\right)` which give the ray's distance to intersection points. To be ahead on the ray's path :math:`\left( t_1, t_2 \\right) >= 0` and to be real intersection points the values must be finite and have imaginary component of zero. The intersection with the cylinder caps is found by intersecting the ray with two infinite planes at :math:`z=0` and :math:`z=L`, where :math:`L` is the length of the cylinder. The ray-plane intersection is given by [2], .. math:: t = \\frac{(Q - P) \cdot n}{D \cdot n} where :math:`t` is the distance from the ray origin to the intersection point, :math:`Q` is a point on the plane and :math:`n` the **outward** facing surface normal at that point. As before :math:`P` is the origin of the ray and :math:`D` is the ray's direction unit vector. For the bottom cap at :math:`z=0`, .. math:: t_{\\text{bot}} = \\frac{ \left( \\begin{bmatrix} 0 \\ 0 \\ -0.5 L \\ \end{bmatrix} - \\begin{bmatrix} x_E \\ y_E \\ z_E \\ \end{bmatrix} \\right) \cdot \\begin{bmatrix} 0 \\ 0 \\ -1 \\ \end{bmatrix} }{ \\begin{bmatrix} x_D \\ y_D \\ z_D \\ \end{bmatrix} \cdot \\begin{bmatrix} 0 \\ 0 \\ -1 \\ \end{bmatrix} } and for the top cap at :math:`z=L`, .. math:: t_{\\text{bot}} = \\frac{ \left( \\begin{bmatrix} 0 \\ 0 \\ 0.5 L \\ \end{bmatrix} - \\begin{bmatrix} x_E \\ y_E \\ z_E \\ \end{bmatrix} \\right) \cdot \\begin{bmatrix} 0 \\ 0 \\ 1 \\ \end{bmatrix} }{ \\begin{bmatrix} x_D \\ y_D \\ z_D \\ \end{bmatrix} \cdot \\begin{bmatrix} 0 \\ 0 \\ 1 \\ \end{bmatrix} } The intersection points with :math:`t<0` and points not contained inside the circle of the end cap are rejected using :math:`(x^2 + y^2) < R`, where :math:`x` and :math:`y` are the components of the candidate intersection point. References ---------- [1] https://www.cl.cam.ac.uk/teaching/1999/AGraphHCI/ [2] https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-plane-and-ray-disk-intersection """ p0 = np.array(ray_origin) n0 = np.array(ray_direction) xe, ye, ze = p0 xd, yd, zd = n0 # Look for intersections on the cylinder surface a = xd ** 2 + yd ** 2 b = 2 * (xe * xd + ye * yd) c = xe ** 2 + ye ** 2 - radius ** 2 tcyl = [ t for t in np.roots([a, b, c]) if np.isfinite(t) and np.isreal(t) and t >= 0 ] # Look for intersections on the cap surfaces with np.errstate(divide="ignore"): # top cap point = np.array([0.0, 0.0, 0.5 * length]) normal = np.array([0.0, 0.0, 1.0]) # outward facing at z = length ttopcap = (point - p0).dot(normal) / n0.dot(normal) # bottom cap point = np.array([0.0, 0.0, -0.5 * length]) normal = np.array([0.0, 0.0, -1.0]) # outward facing at z = 0 tbotcap = (point - p0).dot(normal) / n0.dot(normal) tcap = [t for t in (tbotcap, ttopcap) if np.isfinite(t) and t >= 0.0] # Reject point cap points which are not in the cap's circle radius # and cylinder points which outside the length. cap_candidates = [(p0 + t * n0, t) for t in tcap] cap_candidates = [ (point, t) for (point, t) in cap_candidates if np.sqrt(point[0] ** 2 + point[1] ** 2) < radius ] cyl_candidates = [(p0 + t * n0, t) for t in tcyl] cyl_candidates = [ (point, t) for (point, t) in cyl_candidates if point[2] > -0.5 * length and point[2] < 0.5 * length ] intersection_info = tuple(cyl_candidates) + tuple(cap_candidates) intersection_info = sorted(intersection_info, key=lambda pair: pair[1]) if len(intersection_info) == 0: return ([], []) points = tuple([tuple(p.tolist()) for p in list(zip(*intersection_info))[0]]) distances = tuple([float(d) for d in list(zip(*intersection_info))[1]]) return points, distances # Equality tests def close_to_zero(value) -> bool: return np.all(np.absolute(value) < EPS_ZERO) def points_equal(point1: tuple, point2: tuple) -> bool: return close_to_zero(distance_between(point1, point2)) def floats_close(a, b): return close_to_zero(a - b) def allinrange(x, x_range): """ Returns True if all elements of x are inside x_range, inclusive of the edge values. Parameters ---------- x : array-like A numpy array of values. x_range : tuple of float A tuple defining a range like (xmin, xmax) """ if isinstance(x, (int, float, np.float, np.int)): x = np.array([x]) return np.where(np.logical_or(x < x_range[0], x > x_range[1]))[0].size == 0 # Vector helpers def flip(vector): return -np.array(vector) def magnitude(vector): return np.sqrt(np.dot(np.array(vector), np.array(vector))) def norm(vector): return np.array(vector) / np.linalg.norm(vector) def angle_between(normal, vector): normal = np.array(normal) vector = np.array(vector) if np.allclose(normal, vector): return 0.0 elif np.allclose(-normal, vector): return np.pi dot = np.dot(normal, vector) return np.arccos(dot) def is_ahead(position, direction, point): """ Tests whether point is ahead of the current position. """ if points_equal(position, point): return False d1 = np.dot(self.direction, np.array(point)) d2 = np.dot(self.direction, self.position) return (d1 - d2) > 0 def smallest_angle_between(normal, vector): rads = angle_between(normal, vector) return np.arctan2(np.sin(rads), np.cos(rads)) def distance_between(point1: tuple, point2: tuple) -> float: v = np.array(point1) - np.array(point2) d = np.linalg.norm(v) return d def intersection_point_is_ahead(ray_position, ray_direction, intersection_point): """ Returns true if the intersection point is ahead of the rays trajectory. Notes ----- The intersection point must be a point on the line, p(a) = p0 + a * n. """ return ( np.dot(ray_direction, intersection_point) - np.dot(ray_direction, ray_position) ) > EPS_ZERO
# # Copyright 2019 Delphix # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=missing-docstring from typing import Union def size_nicenum(num: Union[int, float]) -> str: num = float(num) for unit in ['B', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return f"{num:.1f}{unit}" num /= 1024.0 return f"{num:.1f}YB"
"""The tests for the Remote component, adapted from Light Test.""" # pylint: disable=protected-access import unittest import homeassistant.components.remote as remote from homeassistant.const import ( ATTR_ENTITY_ID, CONF_PLATFORM, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, ) from tests.common import get_test_home_assistant, mock_service from tests.components.remote import common TEST_PLATFORM = {remote.DOMAIN: {CONF_PLATFORM: "test"}} SERVICE_SEND_COMMAND = "send_command" SERVICE_LEARN_COMMAND = "learn_command" class TestRemote(unittest.TestCase): """Test the remote module.""" # pylint: disable=invalid-name def setUp(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_is_on(self): """Test is_on.""" self.hass.states.set("remote.test", STATE_ON) assert remote.is_on(self.hass, "remote.test") self.hass.states.set("remote.test", STATE_OFF) assert not remote.is_on(self.hass, "remote.test") def test_turn_on(self): """Test turn_on.""" turn_on_calls = mock_service(self.hass, remote.DOMAIN, SERVICE_TURN_ON) common.turn_on(self.hass, entity_id="entity_id_val") self.hass.block_till_done() assert len(turn_on_calls) == 1 call = turn_on_calls[-1] assert remote.DOMAIN == call.domain def test_turn_off(self): """Test turn_off.""" turn_off_calls = mock_service(self.hass, remote.DOMAIN, SERVICE_TURN_OFF) common.turn_off(self.hass, entity_id="entity_id_val") self.hass.block_till_done() assert len(turn_off_calls) == 1 call = turn_off_calls[-1] assert call.domain == remote.DOMAIN assert call.service == SERVICE_TURN_OFF assert call.data[ATTR_ENTITY_ID] == "entity_id_val" def test_send_command(self): """Test send_command.""" send_command_calls = mock_service( self.hass, remote.DOMAIN, SERVICE_SEND_COMMAND ) common.send_command( self.hass, entity_id="entity_id_val", device="test_device", command=["test_command"], num_repeats="4", delay_secs="0.6", ) self.hass.block_till_done() assert len(send_command_calls) == 1 call = send_command_calls[-1] assert call.domain == remote.DOMAIN assert call.service == SERVICE_SEND_COMMAND assert call.data[ATTR_ENTITY_ID] == "entity_id_val" def test_learn_command(self): """Test learn_command.""" learn_command_calls = mock_service( self.hass, remote.DOMAIN, SERVICE_LEARN_COMMAND ) common.learn_command( self.hass, entity_id="entity_id_val", device="test_device", command=["test_command"], alternative=True, timeout=20, ) self.hass.block_till_done() assert len(learn_command_calls) == 1 call = learn_command_calls[-1] assert call.domain == remote.DOMAIN assert call.service == SERVICE_LEARN_COMMAND assert call.data[ATTR_ENTITY_ID] == "entity_id_val" def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomRemote(remote.RemoteDevice): pass CustomRemote() assert "RemoteDevice is deprecated, modify CustomRemote" in caplog.text
"""Main custom_json op handler.""" import logging from funcy.seqs import first, second from hive.db.adapter import Db from hive.db.db_state import DbState from hive.indexer.accounts import Accounts from hive.indexer.posts import Posts from hive.indexer.feed_cache import FeedCache from hive.indexer.follow import Follow from hive.indexer.community import process_json_community_op from hive.utils.normalize import load_json_key DB = Db.instance() log = logging.getLogger(__name__) class CustomOp: """Processes custom ops and dispatches updates.""" @classmethod def process_ops(cls, ops, block_num, block_date): """Given a list of operation in block, filter and process them.""" for op in ops: if op['id'] not in ['follow', 'com.dsite.community']: continue # we assume `required_posting_auths` is always used and length 1. # it may be that some ops require `required_active_auths` instead. # (e.g. if we use that route for admin action of acct creation) # if op['required_active_auths']: # log.warning("unexpected active auths: %s" % op) if len(op['required_posting_auths']) != 1: log.warning("unexpected auths: %s", op) continue account = op['required_posting_auths'][0] op_json = load_json_key(op, 'json') if op['id'] == 'follow': if block_num < 6000000 and not isinstance(op_json, list): op_json = ['follow', op_json] # legacy compat cls._process_legacy(account, op_json, block_date) elif op['id'] == 'com.dsite.community': if block_num > 23e6: process_json_community_op(account, op_json, block_date) @classmethod def _process_legacy(cls, account, op_json, block_date): """Handle legacy 'follow' plugin ops (follow/mute/clear, reblog)""" if not isinstance(op_json, list): return if len(op_json) != 2: return if first(op_json) not in ['follow', 'reblog']: return if not isinstance(second(op_json), dict): return cmd, op_json = op_json # ['follow', {data...}] if cmd == 'follow': Follow.follow_op(account, op_json, block_date) elif cmd == 'reblog': cls.reblog(account, op_json, block_date) @classmethod def reblog(cls, account, op_json, block_date): """Handle legacy 'reblog' op""" blogger = op_json['account'] author = op_json['author'] permlink = op_json['permlink'] if blogger != account: return # impersonation if not all(map(Accounts.exists, [author, blogger])): return post_id, depth = Posts.get_id_and_depth(author, permlink) if depth > 0: return # prevent comment reblogs if not post_id: log.debug("reblog: post not found: %s/%s", author, permlink) return if 'delete' in op_json and op_json['delete'] == 'delete': DB.query("DELETE FROM hive_reblogs WHERE account = :a AND " "post_id = :pid LIMIT 1", a=blogger, pid=post_id) if not DbState.is_initial_sync(): FeedCache.delete(post_id, Accounts.get_id(blogger)) else: sql = ("INSERT INTO hive_reblogs (account, post_id, created_at) " "VALUES (:a, :pid, :date) ON CONFLICT (account, post_id) DO NOTHING") DB.query(sql, a=blogger, pid=post_id, date=block_date) if not DbState.is_initial_sync(): FeedCache.insert(post_id, Accounts.get_id(blogger), block_date)
import pytest from pywps import Service from pywps.tests import assert_response_success from .common import TESTDATA, client_for, CFG_FILE # from flyingpigeon.processes import IndicessingleProcess @pytest.mark.skip(reason="no way of currently testing this") def test_wps_indices_simple(): client = client_for(Service(processes=[IndicessingleProcess()], cfgfiles=CFG_FILE)) datainputs = "resource=files@xlink:href={0};indices=SU;grouping=yr".\ format(TESTDATA['cordex_tasmax_2006_nc']) resp = client.get( service='WPS', request='Execute', version='1.0.0', identifier='indices_single', datainputs=datainputs) assert_response_success(resp)
# (C) StackState 2020 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import jsonpickle as jsonpickle try: from unittest.mock import patch except ImportError: from mock import patch from stackstate_checks.base.stubs import topology from stackstate_checks.cloudera import ClouderaCheck try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse class MockClouderaClient: def __init__(self, instance): pass def get_cluster_api(self): return self.read_data(self.get_file('cluster_api_response.json')) def get_host_api(self): return self.read_data(self.get_file('host_api_response.json')) def get_service_api(self, cluster_name): return self.read_data(self.get_file('services_api_response_{}.json'.format(cluster_name))) def get_roles_api(self, cluster_name, service_name): return self.read_data(self.get_file('roles_api_response_{}_{}.json'.format(cluster_name, service_name))) @staticmethod def read_data(file_name): with open(file_name, 'r') as file: json_file = file.read() return jsonpickle.decode(json_file) @staticmethod def get_file(file_name): return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', file_name) @patch('stackstate_checks.cloudera.cloudera.ClouderaClient', MockClouderaClient) def test_check_collect_topology(aggregator, instance): check = ClouderaCheck('test', {}, {}, instances=[instance]) check.check(instance) snapshot = topology.get_snapshot('') instance_url = urlparse(instance['url']).netloc assert snapshot['instance_key']['url'] == instance_url assert len(snapshot['components']) == 36 assert len(snapshot['relations']) == 57 aggregator.assert_all_metrics_covered()
# Jacob Gildenblat, 2015 # Implementation of edge preserving smoothing by minimizing with the Ambrosio-Tortorelli appoach # AM scheme, using conjugate gradients import cv2, scipy import numpy as np import sys import scipy from scipy.sparse.linalg import LinearOperator class AmbrosioTortorelliMinimizer(): def __init__(self, img, iterations=1, solver_maxiterations=10, tol=0.1, alpha=1000, beta=0.01, epsilon=0.01): self.iterations = iterations self.tol = tol self.g = np.float64(img) / np.max(img) self.f = self.g self.edges = np.zeros(img.shape) self.update_gradients() self.alpha, self.beta, self.epsilon = alpha, beta, epsilon self.add_const = self.beta / (4 * self.epsilon) self.multiply_const = self.epsilon * self.beta self.maxiter = solver_maxiterations def update_gradients(self): self.grad_x, self.grad_y = self.gradients(self.f) self.gradient_mag = np.power(self.grad_x, 2) + np.power(self.grad_y, 2) def edge_linear_operator(self, input): v = input.reshape(*self.g.shape) result = np.multiply(v, self.gradient_mag * self.alpha + self.add_const) \ - self.multiply_const * cv2.Laplacian(v, cv2.CV_64F) return result.reshape(*input.shape) def image_linear_operator(self, input): f = input.reshape(*self.g.shape) x, y = self.gradients(f) result = f - 2 * self.alpha * ( self.calc_grad_x(np.multiply(self.edges, x)) + self.calc_grad_y(np.multiply(self.edges, y))) return result.reshape(*input.shape) def solve_edges(self): size = self.g.shape[0] * self.g.shape[1] A = LinearOperator((size, size), matvec=self.edge_linear_operator, dtype=np.float64) b = np.ones(size) * self.beta / (4 * self.epsilon) self.edges, _ = scipy.sparse.linalg.cg(A, b, tol=self.tol, maxiter=self.maxiter) self.edges = np.power(self.edges.reshape(*self.g.shape), 2) return self.edges def solve_image(self): size = self.g.shape[0] * self.g.shape[1] A = LinearOperator((size, size), matvec=self.image_linear_operator, dtype=np.float64) b = self.g.reshape(size) self.f, _ = scipy.sparse.linalg.cg(A, b, tol=self.tol, maxiter=self.maxiter) self.f = self.f.reshape(*self.g.shape) self.update_gradients() return self.f def minimize(self): for i in range(0, self.iterations): self.solve_edges() self.solve_image() self.edges = np.power(self.edges, 0.5) cv2.normalize(self.f, self.f, 0, 255, cv2.NORM_MINMAX) cv2.normalize(self.edges, self.edges, 0, 255, cv2.NORM_MINMAX) self.f = np.uint8(self.f) self.edges = 255 - np.uint8(self.edges) return self.f, self.edges def calc_grad_x(self, img): return cv2.filter2D(img, cv2.CV_64F, np.array([[-1, 0, 1]])) def calc_grad_y(self, img): return cv2.filter2D(img, cv2.CV_64F, np.array([[-1, 0, 1]]).T) def gradients(self, img): return self.calc_grad_x(img), self.calc_grad_y(img) def show_image(image, name): img = image * 1 cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX) img = np.uint8(img) cv2.imshow(name, img) if __name__ == "__main__": img = cv2.imread(sys.argv[1], 1) result, edges = [], [] for channel in cv2.split(img): solver = AmbrosioTortorelliMinimizer(channel, iterations=1, tol=0.1, solver_maxiterations=6) f, v = solver.minimize() result.append(f) edges.append(v) f = cv2.merge(result) v = np.maximum(*edges) show_image(v, "edges") show_image(f, "image") show_image(img, "original") cv2.waitKey(-1)
from django.db import models from django.utils import timezone from data_refinery_common.models.computed_file import ComputedFile from data_refinery_common.models.managers import PublicObjectsManager # Compendium Computational Result class CompendiumResult(models.Model): """ Computational Result For A Compendium """ class Meta: db_table = "compendium_results" base_manager_name = "public_objects" def __str__(self): return "CompendiumResult " + str(self.pk) SVD_ALGORITHM_CHOICES = ( ("NONE", "None"), ("RANDOMIZED", "randomized"), ("ARPACK", "arpack"), ) # Managers objects = models.Manager() public_objects = PublicObjectsManager() # Relations result = models.ForeignKey( "ComputationalResult", blank=False, null=False, related_name="compendium_result", on_delete=models.CASCADE, ) primary_organism = models.ForeignKey( "Organism", blank=False, null=False, related_name="primary_compendium_results", on_delete=models.CASCADE, ) organisms = models.ManyToManyField( "Organism", related_name="compendium_results", through="CompendiumResultOrganismAssociation" ) # Properties quant_sf_only = models.BooleanField(default=False) compendium_version = models.IntegerField(blank=True, null=True) svd_algorithm = models.CharField( max_length=255, choices=SVD_ALGORITHM_CHOICES, default="NONE", help_text="The SVD algorithm that was used to impute the compendium result.", ) # Common Properties is_public = models.BooleanField(default=True) created_at = models.DateTimeField(editable=False, default=timezone.now) last_modified = models.DateTimeField(default=timezone.now) def save(self, *args, **kwargs): """ On save, update timestamps """ current_time = timezone.now() if not self.id: self.created_at = current_time self.last_modified = current_time return super(CompendiumResult, self).save(*args, **kwargs) # helper def get_computed_file(self): """ Short hand method for getting the computed file for this compendium""" return ComputedFile.objects.filter(result=self.result).first()
import logging import os import time from application import Exponentiator from utility import get_service_name log = logging.getLogger(__name__) ENVIRONMENT_SLEEP_DURATION_KEY = 'SLEEP_DURATION' class DaemonApp: def __init__(self): self.exponentiator = None def setup(self, application_name): self.exponentiator = Exponentiator() log.debug(" setup -- Setting up application configuration for [%s]", application_name) def run(self, application_name): should_run = True error_retry_duration = 1 log.info(" run -- Initiating application [%s]", application_name) while should_run: try: self.exponentiator.execute_check(compound_pct=100) sleep_duration = os.getenv(ENVIRONMENT_SLEEP_DURATION_KEY, 5 * 60) log.debug(" run -- sleeping for %s before checking again, Edit Env [%s]", sleep_duration, ENVIRONMENT_SLEEP_DURATION_KEY) time.sleep(sleep_duration) error_retry_duration = 1 except KeyboardInterrupt: exit(0) except Exception as e: log.error(" run -- seems there is an issue executing check ", exc_info=True) # when there are errors in the network we wait for a shorter period before retrying # Using an exponential retry mechanism until we are waiting for 10 minutes if error_retry_duration < 600: error_retry_duration *= 2 time.sleep(error_retry_duration) @classmethod def clean_up(cls): pass @classmethod def reload_configs(cls): pass if __name__ == '__main__': daemon_app = DaemonApp() daemon_app.setup(application_name=get_service_name()) daemon_app.run(application_name=get_service_name())
import random import os import ConfigParser import time import subprocess from tumblpy import Tumblpy from make_gifs import make_gif, check_config config = ConfigParser.ConfigParser() config.read("config.cfg") config.sections() slugs = check_config("config.cfg")[3] CONSUMER_KEY = config.get("tumblr", "consumer_key") CONSUMER_SECRET = config.get("tumblr", "consumer_secret") OAUTH_TOKEN = config.get("tumblr", "oauth_token") OAUTH_TOKEN_SECRET = config.get("tumblr", "oauth_token_secret") t = Tumblpy( CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, ) while True: # you can set many more options, check the make_gif-function quote = make_gif(random.choice(slugs), frames=20) quote = ' '.join(quote) # reduce amount of colors, because tumblr sucks subprocess.call(['convert', 'star_wars.gif', '-layers', 'optimize', '-colors', '64', '-loop', '0', 'star_wars.gif']) while(os.path.getsize('star_wars.gif') > 1048576): subprocess.call(['convert', 'star_wars.gif', '-resize', '90%', '-coalesce', '-layers', 'optimize', '-loop', '0', 'star_wars.gif']) photo = open('star_wars.gif', 'rb') post = t.post( 'post', blog_url='http://starwarsgifsasaservice.tumblr.com', params={ 'type': 'photo', 'caption': quote, 'data': photo, 'tags': 'star wars, gif'} ) print("sleeping...") # sleep 12 hours time.sleep(43200)
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from .config import * from .experiment import Experiment from .nni_client import *
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-28 03:43 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('job_board', '0006_siteconfig_remote'), ] operations = [ migrations.AlterField( model_name='company', name='twitter', field=models.CharField(blank=True, help_text='Please leave empty if none', max_length=20, null=True), ), migrations.AlterField( model_name='siteconfig', name='remote', field=models.BooleanField(default=False, help_text='Select if this job board is for remote jobs only'), ), ]
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r"""Box decomposition algorithms. References .. [Lacour17] R. Lacour, K. Klamroth, C. Fonseca. A box decomposition algorithm to compute the hypervolume indicator. Computers & Operations Research, Volume 79, 2017. """ from __future__ import annotations from abc import ABC, abstractmethod from typing import Optional import torch from botorch.exceptions.errors import BotorchError from botorch.utils.multi_objective.box_decompositions.utils import ( _expand_ref_point, _pad_batch_pareto_frontier, update_local_upper_bounds_incremental, ) from botorch.utils.multi_objective.pareto import is_non_dominated from torch import Tensor from torch.nn import Module class BoxDecomposition(Module, ABC): r"""An abstract class for box decompositions. Note: Internally, we store the negative reference point (minimization). """ def __init__( self, ref_point: Tensor, sort: bool, Y: Optional[Tensor] = None ) -> None: """Initialize BoxDecomposition. Args: ref_point: A `m`-dim tensor containing the reference point. sort: A boolean indicating whether to sort the Pareto frontier. Y: A `(batch_shape) x n x m`-dim tensor of outcomes. """ super().__init__() self.register_buffer("_neg_ref_point", -ref_point) self.register_buffer("sort", torch.tensor(sort, dtype=torch.bool)) self.num_outcomes = ref_point.shape[-1] if Y is not None: self._update_neg_Y(Y=Y) self.reset() @property def pareto_Y(self) -> Tensor: r"""This returns the non-dominated set. Returns: A `n_pareto x m`-dim tensor of outcomes. """ try: return -self._neg_pareto_Y except AttributeError: raise BotorchError("pareto_Y has not been initialized") @property def ref_point(self) -> Tensor: r"""Get the reference point. Returns: A `m`-dim tensor of outcomes. """ return -self._neg_ref_point @property def Y(self) -> Tensor: r"""Get the raw outcomes. Returns: A `n x m`-dim tensor of outcomes. """ return -self._neg_Y def _reset_pareto_Y(self) -> bool: r"""Update the non-dominated front. Returns: A boolean indicating whether the Pareto frontier has changed. """ # is_non_dominated assumes maximization if self._neg_Y.shape[-2] == 0: pareto_Y = self._neg_Y else: # assumes maximization pareto_Y = -_pad_batch_pareto_frontier( Y=self.Y, ref_point=_expand_ref_point( ref_point=self.ref_point, batch_shape=self.batch_shape ), ) if self.sort: # sort by first objective if len(self.batch_shape) > 0: pareto_Y = pareto_Y.gather( index=torch.argsort(pareto_Y[..., :1], dim=-2).expand( pareto_Y.shape ), dim=-2, ) else: pareto_Y = pareto_Y[torch.argsort(pareto_Y[:, 0])] if not hasattr(self, "_neg_pareto_Y") or not torch.equal( pareto_Y, self._neg_pareto_Y ): self.register_buffer("_neg_pareto_Y", pareto_Y) return True return False def partition_space(self) -> None: r"""Compute box decomposition.""" if self.num_outcomes == 2: try: self._partition_space_2d() except NotImplementedError: self._partition_space() else: self._partition_space() def _partition_space_2d(self) -> None: r"""Compute box decomposition for 2 objectives.""" raise NotImplementedError @abstractmethod def _partition_space(self): r"""Partition the non-dominated space into disjoint hypercells. This method supports an arbitrary number of outcomes, but is less efficient than `partition_space_2d` for the 2-outcome case. """ pass # pragma: no cover @abstractmethod def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Returns: A `2 x num_cells x num_outcomes`-dim tensor containing the lower and upper vertices bounding each hypercell. """ pass # pragma: no cover def _update_neg_Y(self, Y: Tensor) -> bool: r"""Update the set of outcomes. Returns: A boolean indicating if _neg_Y was initialized. """ # multiply by -1, since internally we minimize. try: self._neg_Y = torch.cat([self._neg_Y, -Y], dim=-2) return False except AttributeError: self.register_buffer("_neg_Y", -Y) return True def update(self, Y: Tensor) -> None: r"""Update non-dominated front and decomposition. By default, the partitioning is recomputed. Subclasses can override this functionality. Args: Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes. """ self._update_neg_Y(Y=Y) self.reset() def reset(self) -> None: r"""Reset non-dominated front and decomposition.""" self.batch_shape = self.Y.shape[:-2] self.num_outcomes = self.Y.shape[-1] if len(self.batch_shape) > 1: raise NotImplementedError( f"{type(self).__name__} only supports a single " f"batch dimension, but got {len(self.batch_shape)} " "batch dimensions." ) elif len(self.batch_shape) > 0 and self.num_outcomes > 2: raise NotImplementedError( f"{type(self).__name__} only supports a batched box " f"decompositions in the 2-objective setting." ) is_new_pareto = self._reset_pareto_Y() # Update decomposition if the Pareto front changed if is_new_pareto: self.partition_space() @abstractmethod def compute_hypervolume(self) -> Tensor: r"""Compute hypervolume that is dominated by the Pareto Froniter. Returns: A `(batch_shape)`-dim tensor containing the hypervolume dominated by each Pareto frontier. """ pass # pragma: no cover class FastPartitioning(BoxDecomposition, ABC): r"""A class for partitioning the (non-)dominated space into hyper-cells. Note: this assumes maximization. Internally, it multiplies outcomes by -1 and performs the decomposition under minimization. This class is abstract to support to two applications of Alg 1 from [Lacour17]_: 1) partitioning the space that is dominated by the Pareto frontier and 2) partitioning the space that is not dominated by the Pareto frontier. """ def __init__( self, ref_point: Tensor, Y: Optional[Tensor] = None, ) -> None: """Initialize FastPartitioning. Args: ref_point: A `m`-dim tensor containing the reference point. Y: A `(batch_shape) x n x m`-dim tensor """ super().__init__(ref_point=ref_point, Y=Y, sort=ref_point.shape[-1] == 2) def update(self, Y: Tensor) -> None: r"""Update non-dominated front and decomposition. Args: Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes. """ if self._update_neg_Y(Y=Y): self.reset() else: if self.num_outcomes == 2 or self._neg_pareto_Y.shape[-2] == 0: # If there are two objective, recompute the box decomposition # because the partitions can be computed analytically. # If the current pareto set has no points, recompute the box # decomposition. self.reset() else: # only include points that are better than the reference point better_than_ref = (Y > self.ref_point).all(dim=-1) Y = Y[better_than_ref] Y_all = torch.cat([self._neg_pareto_Y, -Y], dim=-2) pareto_mask = is_non_dominated(-Y_all) # determine the number of points in Y that are Pareto optimal num_new_pareto = pareto_mask[-Y.shape[-2] :].sum() self._neg_pareto_Y = Y_all[pareto_mask] if num_new_pareto > 0: # update local upper bounds for the minimization problem self._U, self._Z = update_local_upper_bounds_incremental( # this assumes minimization new_pareto_Y=self._neg_pareto_Y[-num_new_pareto:], U=self._U, Z=self._Z, ) # use the negative local upper bounds as the new pareto # frontier for the minimization problem and perform # box decomposition on dominated space. self._get_partitioning() @abstractmethod def _get_single_cell(self) -> None: r"""Set the partitioning to be a single cell in the case of no Pareto points. This method should set self.hypercell_bounds """ pass # pragma: no cover def partition_space(self) -> None: if self._neg_pareto_Y.shape[-2] == 0: self._get_single_cell() else: super().partition_space() def _partition_space(self): r"""Partition the non-dominated space into disjoint hypercells. This method supports an arbitrary number of outcomes, but is less efficient than `partition_space_2d` for the 2-outcome case. """ if len(self.batch_shape) > 0: # this could be triggered when m=2 outcomes and # BoxDecomposition._partition_space_2d is not overridden. raise NotImplementedError( "_partition_space does not support batch dimensions." ) # this assumes minimization # initialize local upper bounds self.register_buffer("_U", self._neg_ref_point.unsqueeze(-2).clone()) # initialize defining points to be the dummy points \hat{z} that are # defined in Sec 2.1 in [Lacour17]_. Note that in [Lacour17]_, outcomes # are assumed to be between [0,1], so they used 0 rather than -inf. self._Z = torch.zeros( 1, self.num_outcomes, self.num_outcomes, dtype=self.Y.dtype, device=self.Y.device, ) for j in range(self.ref_point.shape[-1]): # use ref point for maximization as the ideal point for minimization. self._Z[0, j] = float("-inf") self._Z[0, j, j] = self._U[0, j] # incrementally update local upper bounds and defining points # for each new Pareto point self._U, self._Z = update_local_upper_bounds_incremental( new_pareto_Y=self._neg_pareto_Y, U=self._U, Z=self._Z, ) self._get_partitioning() @abstractmethod def _get_partitioning(self) -> None: r"""Compute partitioning given local upper bounds for the minimization problem. This method should set self.hypercell_bounds """ pass # pragma: no cover def get_hypercell_bounds(self) -> Tensor: r"""Get the bounds of each hypercell in the decomposition. Returns: A `2 x (batch_shape) x num_cells x m`-dim tensor containing the lower and upper vertices bounding each hypercell. """ return self.hypercell_bounds
# coding: utf-8 from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model class H262PresetConfiguration(Enum): XDCAM_HD_422 = "XDCAM_HD_422"
from multiprocessing import Process, Event from termcolor import colored from .helper import set_logger class BertHTTPProxy(Process): def __init__(self, args): super().__init__() self.args = args self.is_ready = Event() def create_flask_app(self): try: from flask import Flask, request,Response from flask_compress import Compress from flask_cors import CORS from flask_json import FlaskJSON, as_json, JsonError from bert_serving.client import ConcurrentBertClient except ImportError: raise ImportError('BertClient or Flask or its dependencies are not fully installed, ' 'they are required for serving HTTP requests.' 'Please use "pip install -U bert-serving-server[http]" to install it.') # support up to 10 concurrent HTTP requests bc = ConcurrentBertClient(max_concurrency=self.args.http_max_connect, port=self.args.port, port_out=self.args.port_out, output_fmt='list', ignore_all_checks=True) app = Flask(__name__) logger = set_logger(colored('PROXY', 'red')) @app.route('/status/server', methods=['GET']) @as_json def get_server_status(): return bc.server_status @app.route('/status/client', methods=['GET']) @as_json def get_client_status(): return bc.status @app.route('/encode', methods=['POST']) @as_json def encode_query(): data = request.form if request.form else request.json try: logger.info('new request from %s' % request.remote_addr) return {'id': data['id'], 'result': bc.encode(data['texts'], is_tokenized=bool( data['is_tokenized']) if 'is_tokenized' in data else False)} except Exception as e: logger.error('error when handling HTTP request', exc_info=True) raise JsonError(description=str(e), type=str(type(e).__name__)) @app.route('/invocations', methods=['POST']) @as_json def invocations(): """ a copy from encode_query to serve sagemarker :return: """ data = request.form if request.form else request.json try: logger.info('new request from %s' % request.remote_addr) return {'id': data['id'], 'result': bc.encode(data['texts'], is_tokenized=bool( data['is_tokenized']) if 'is_tokenized' in data else False)} except Exception as e: logger.error('error when handling HTTP request', exc_info=True) raise JsonError(description=str(e), type=str(type(e).__name__)) @app.route('/ping', methods=['GET']) def ping(): """Determine if the container is working and healthy. In this sample container, we declare it healthy if we can load the model successfully.""" health = bc is not None # You can insert a health check here status = 200 if health else 404 return Response(response='\n', status=status, mimetype='application/json') CORS(app, origins=self.args.cors) FlaskJSON(app) Compress().init_app(app) return app def run(self): app = self.create_flask_app() self.is_ready.set() app.run(port=self.args.http_port, threaded=True, host='0.0.0.0')
""" semi-automation of google search for information """ import webbrowser as wb from time import sleep # from bs4 import BeautifulSoup # import requests foods = [ 'egg', 'avocado', 'spinach', 'peanut', 'cheese', 'brocoli', 'chicken', 'mayonnaise', 'salmon', 'tuna', 'tomato', 'onion', 'lemon', 'chorizo', 'pickle', 'meat', 'pork', 'lettuce' ] stats = ['carbs', 'proteins', 'fat', 'calories'] url = 'https://www.google.com/search?q={}+in+{}&ie=utf-8&oe=utf-8' with open('food.csv', 'w') as storage: for f in foods: storage.write('{}\n'.format(f)) sleep(5) for f in foods: for s in stats: wb.open_new_tab(url.format(s, f)) # TODO: try with requests - BeautifulSoup # response = requests.get(url.format(s, f)) # soup = BeautifulSoup(response, 'html.parser') # answer = soup.find_all('div', '_XWk an_fna') # print(answer) # print('{} contains {} of {}'.format(f, answer, s)) sleep(12)
# coding: utf-8 import re import six from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class DatabaseForCreation: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'name': 'str', 'character_set': 'str', 'comment': 'str' } attribute_map = { 'name': 'name', 'character_set': 'character_set', 'comment': 'comment' } def __init__(self, name=None, character_set=None, comment=None): """DatabaseForCreation - a model defined in huaweicloud sdk""" self._name = None self._character_set = None self._comment = None self.discriminator = None self.name = name self.character_set = character_set if comment is not None: self.comment = comment @property def name(self): """Gets the name of this DatabaseForCreation. 数据库名称。 数据库名称长度可在1~64个字符之间,由字母、数字、中划线、下划线或$组成,$累计总长度小于等于10个字符,(MySQL 8.0不可包含$)。 :return: The name of this DatabaseForCreation. :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this DatabaseForCreation. 数据库名称。 数据库名称长度可在1~64个字符之间,由字母、数字、中划线、下划线或$组成,$累计总长度小于等于10个字符,(MySQL 8.0不可包含$)。 :param name: The name of this DatabaseForCreation. :type: str """ self._name = name @property def character_set(self): """Gets the character_set of this DatabaseForCreation. 数据库使用的字符集,例如utf8、gbk、ascii等MySQL支持的字符集。 :return: The character_set of this DatabaseForCreation. :rtype: str """ return self._character_set @character_set.setter def character_set(self, character_set): """Sets the character_set of this DatabaseForCreation. 数据库使用的字符集,例如utf8、gbk、ascii等MySQL支持的字符集。 :param character_set: The character_set of this DatabaseForCreation. :type: str """ self._character_set = character_set @property def comment(self): """Gets the comment of this DatabaseForCreation. 数据库备注,最大长度512 :return: The comment of this DatabaseForCreation. :rtype: str """ return self._comment @comment.setter def comment(self, comment): """Sets the comment of this DatabaseForCreation. 数据库备注,最大长度512 :param comment: The comment of this DatabaseForCreation. :type: str """ self._comment = comment def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): """For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DatabaseForCreation): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
from os import walk, sep, pardir from os.path import split, join, abspath, exists, isfile from glob import glob import re import random from sympy.core.compatibility import PY3 # System path separator (usually slash or backslash) to be # used with excluded files, e.g. # exclude = set([ # "%(sep)smpmath%(sep)s" % sepd, # ]) sepd = {"sep": sep} # path and sympy_path SYMPY_PATH = abspath(join(split(__file__)[0], pardir, pardir)) # go to sympy/ assert exists(SYMPY_PATH) TOP_PATH = abspath(join(SYMPY_PATH, pardir)) BIN_PATH = join(TOP_PATH, "bin") EXAMPLES_PATH = join(TOP_PATH, "examples") # Error messages message_space = "File contains trailing whitespace: %s, line %s." message_implicit = "File contains an implicit import: %s, line %s." message_tabs = "File contains tabs instead of spaces: %s, line %s." message_carriage = "File contains carriage returns at end of line: %s, line %s" message_str_raise = "File contains string exception: %s, line %s" message_gen_raise = "File contains generic exception: %s, line %s" message_old_raise = "File contains old-style raise statement: %s, line %s, \"%s\"" message_eof = "File does not end with a newline: %s, line %s" message_multi_eof = "File ends with more than 1 newline: %s, line %s" message_test_suite_def = "Function should start with 'test_' or '_': %s, line %s" implicit_test_re = re.compile(r'^\s*(>>> )?(\.\.\. )?from .* import .*\*') str_raise_re = re.compile( r'^\s*(>>> )?(\.\.\. )?raise(\s+(\'|\")|\s*(\(\s*)+(\'|\"))') gen_raise_re = re.compile( r'^\s*(>>> )?(\.\.\. )?raise(\s+Exception|\s*(\(\s*)+Exception)') old_raise_re = re.compile(r'^\s*(>>> )?(\.\.\. )?raise((\s*\(\s*)|\s+)\w+\s*,') test_suite_def_re = re.compile(r'^def\s+(?!(_|test))[^(]*\(\s*\)\s*:$') test_file_re = re.compile(r'.*test_.*\.py$') def tab_in_leading(s): """Returns True if there are tabs in the leading whitespace of a line, including the whitespace of docstring code samples.""" n = len(s) - len(s.lstrip()) if not s[n:n + 3] in ['...', '>>>']: check = s[:n] else: smore = s[n + 3:] check = s[:n] + smore[:len(smore) - len(smore.lstrip())] return not (check.expandtabs() == check) def check_directory_tree(base_path, file_check, exclusions=set(), pattern="*.py"): """ Checks all files in the directory tree (with base_path as starting point) with the file_check function provided, skipping files that contain any of the strings in the set provided by exclusions. """ if not base_path: return for root, dirs, files in walk(base_path): check_files(glob(join(root, pattern)), file_check, exclusions) def check_files(files, file_check, exclusions=set(), pattern=None): """ Checks all files with the file_check function provided, skipping files that contain any of the strings in the set provided by exclusions. """ if not files: return for fname in files: if not exists(fname) or not isfile(fname): continue if any(ex in fname for ex in exclusions): continue if pattern is None or re.match(pattern, fname): file_check(fname) def test_files(): """ This test tests all files in sympy and checks that: o no lines contains a trailing whitespace o no lines end with \r\n o no line uses tabs instead of spaces o that the file ends with a single newline o there are no general or string exceptions o there are no old style raise statements o name of arg-less test suite functions start with _ or test_ """ def test(fname): if PY3: with open(fname, "rt", encoding="utf8") as test_file: test_this_file(fname, test_file) else: with open(fname, "rt") as test_file: test_this_file(fname, test_file) def test_this_file(fname, test_file): line = None # to flag the case where there were no lines in file for idx, line in enumerate(test_file): if test_file_re.match(fname) and test_suite_def_re.match(line): assert False, message_test_suite_def % (fname, idx + 1) if line.endswith(" \n") or line.endswith("\t\n"): assert False, message_space % (fname, idx + 1) if line.endswith("\r\n"): assert False, message_carriage % (fname, idx + 1) if tab_in_leading(line): assert False, message_tabs % (fname, idx + 1) if str_raise_re.search(line): assert False, message_str_raise % (fname, idx + 1) if gen_raise_re.search(line): assert False, message_gen_raise % (fname, idx + 1) if (implicit_test_re.search(line) and not filter(lambda ex: ex in fname, import_exclude)): assert False, message_implicit % (fname, idx + 1) result = old_raise_re.search(line) if result is not None: assert False, message_old_raise % ( fname, idx + 1, result.group(2)) if line is not None: if line == '\n' and idx > 0: assert False, message_multi_eof % (fname, idx + 1) elif not line.endswith('\n'): # eof newline check assert False, message_eof % (fname, idx + 1) # Files to test at top level top_level_files = [join(TOP_PATH, file) for file in [ "build.py", "setup.py", "setupegg.py", ]] # Files to exclude from all tests exclude = set([ "%(sep)smpmath%(sep)s" % sepd, ]) # Files to exclude from the implicit import test import_exclude = set([ # glob imports are allowed in top-level __init__.py: "%(sep)ssympy%(sep)s__init__.py" % sepd, # these __init__.py should be fixed: # XXX: not really, they use useful import pattern (DRY) "%(sep)smechanics%(sep)s__init__.py" % sepd, "%(sep)squantum%(sep)s__init__.py" % sepd, "%(sep)spolys%(sep)s__init__.py" % sepd, "%(sep)spolys%(sep)sdomains%(sep)s__init__.py" % sepd, # interactive sympy executes ``from sympy import *``: "%(sep)sinteractive%(sep)ssession.py" % sepd, # isympy executes ``from sympy import *``: "%(sep)sbin%(sep)sisympy" % sepd, # these two are import timing tests: "%(sep)sbin%(sep)ssympy_time.py" % sepd, "%(sep)sbin%(sep)ssympy_time_cache.py" % sepd, # Taken from Python stdlib: "%(sep)sparsing%(sep)ssympy_tokenize.py" % sepd, # these two should be fixed: "%(sep)splotting%(sep)spygletplot%(sep)s" % sepd, "%(sep)splotting%(sep)stextplot.py" % sepd, ]) check_files(top_level_files, test) check_directory_tree(BIN_PATH, test, set(["~", ".pyc", ".sh"]), "*") check_directory_tree(SYMPY_PATH, test, exclude) check_directory_tree(EXAMPLES_PATH, test, exclude) def _with_space(c): # return c with a random amount of leading space return random.randint(0, 10)*' ' + c def test_raise_statement_regular_expression(): candidates_ok = [ "some text # raise Exception, 'text'", "raise ValueError('text') # raise Exception, 'text'", "raise ValueError('text')", "raise ValueError", "raise ValueError('text')", "raise ValueError('text') #,", # Talking about an exception in a docstring ''''"""This function will raise ValueError, except when it doesn't"""''', "raise (ValueError('text')", ] str_candidates_fail = [ "raise 'exception'", "raise 'Exception'", 'raise "exception"', 'raise "Exception"', "raise 'ValueError'", ] gen_candidates_fail = [ "raise Exception('text') # raise Exception, 'text'", "raise Exception('text')", "raise Exception", "raise Exception('text')", "raise Exception('text') #,", "raise Exception, 'text'", "raise Exception, 'text' # raise Exception('text')", "raise Exception, 'text' # raise Exception, 'text'", ">>> raise Exception, 'text'", ">>> raise Exception, 'text' # raise Exception('text')", ">>> raise Exception, 'text' # raise Exception, 'text'", ] old_candidates_fail = [ "raise Exception, 'text'", "raise Exception, 'text' # raise Exception('text')", "raise Exception, 'text' # raise Exception, 'text'", ">>> raise Exception, 'text'", ">>> raise Exception, 'text' # raise Exception('text')", ">>> raise Exception, 'text' # raise Exception, 'text'", "raise ValueError, 'text'", "raise ValueError, 'text' # raise Exception('text')", "raise ValueError, 'text' # raise Exception, 'text'", ">>> raise ValueError, 'text'", ">>> raise ValueError, 'text' # raise Exception('text')", ">>> raise ValueError, 'text' # raise Exception, 'text'", "raise(ValueError,", "raise (ValueError,", "raise( ValueError,", "raise ( ValueError,", "raise(ValueError ,", "raise (ValueError ,", "raise( ValueError ,", "raise ( ValueError ,", ] for c in candidates_ok: assert str_raise_re.search(_with_space(c)) is None, c assert gen_raise_re.search(_with_space(c)) is None, c assert old_raise_re.search(_with_space(c)) is None, c for c in str_candidates_fail: assert str_raise_re.search(_with_space(c)) is not None, c for c in gen_candidates_fail: assert gen_raise_re.search(_with_space(c)) is not None, c for c in old_candidates_fail: assert old_raise_re.search(_with_space(c)) is not None, c def test_implicit_imports_regular_expression(): candidates_ok = [ "from sympy import something", ">>> from sympy import something", "from sympy.somewhere import something", ">>> from sympy.somewhere import something", "import sympy", ">>> import sympy", "import sympy.something.something", "... import sympy", "... import sympy.something.something", "... from sympy import something", "... from sympy.somewhere import something", ">> from sympy import *", # To allow 'fake' docstrings "# from sympy import *", "some text # from sympy import *", ] candidates_fail = [ "from sympy import *", ">>> from sympy import *", "from sympy.somewhere import *", ">>> from sympy.somewhere import *", "... from sympy import *", "... from sympy.somewhere import *", ] for c in candidates_ok: assert implicit_test_re.search(_with_space(c)) is None, c for c in candidates_fail: assert implicit_test_re.search(_with_space(c)) is not None, c def test_test_suite_defs(): candidates_ok = [ " def foo():\n", "def foo(arg):\n", "def _foo():\n", "def test_foo():\n", ] candidates_fail = [ "def foo():\n", "def foo() :\n", "def foo( ):\n", "def foo():\n", ] for c in candidates_ok: assert test_suite_def_re.search(c) is None, c for c in candidates_fail: assert test_suite_def_re.search(c) is not None, c
#!/bin/env python # -*- coding: utf-8 -*- """ Copyright 2020-present Works Mobile Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ Generate token according to JWT protocol """ __all__ = ['create_tmp_token', 'generate_token'] import python_jwt as jwt import jwcrypto.jwk as jwk import datetime import requests import json from attendance_management_bot.constant import API_BO, HEROKU_SERVER_ID, \ PRIVATE_KEY_PATH def create_tmp_token(key_path, server_id): """ This function use JWT protocol to creates a temporary token for user authentication. Focus on the "Server Token (ID Registration Style)" section of the following documents. reference - https://developers.worksmobile.com/jp/document/1002002?lang=en """ with open(key_path, "rb") as _file: key = _file.read() private_key = jwk.JWK.from_pem(key) payload = {"iss": server_id} token = jwt.generate_jwt(payload, private_key, 'RS256', datetime.timedelta(minutes=5)) return token return None def generate_token(): """ Using JWT protocol to create token. Focus on the "Server Token (ID Registration Style)" section of the following documents. reference - https://developers.worksmobile.com/jp/document/1002002?lang=en """ tmp_token = create_tmp_token(PRIVATE_KEY_PATH, HEROKU_SERVER_ID) if tmp_token is None: raise Exception("generate tmp token failed.") headers = { "Content-Type": "application/x-www-form-urlencoded", "charset": "UTF-8" } url = API_BO["auth_url"] + tmp_token response = requests.post(url, headers=headers) if response.status_code != 200: raise Exception("generate token failed.") content = json.loads(response.content) token = content.get("access_token", None) if token is None: raise Exception("response token is None.") return token
import json from django.core.urlresolvers import reverse from seaserv import seafile_api from seahub.test_utils import BaseTestCase from seahub.share.models import ExtraSharePermission class Shares(BaseTestCase): def setUp(self): self.repo_id = self.repo.id self.group_id = self.group.id self.user_name = self.user.username self.admin_name = self.admin.username self.para = '?repo_id=%s&path=/' % self.repo_id self.url = reverse('api-v2.1-admin-shares') self.tmp_user = self.create_user('tmp@email.com') self.tmp_user_email = self.tmp_user.username def tearDown(self): self.remove_repo() self.remove_user(self.tmp_user_email) def test_can_get_user_shared(self): self.share_repo_to_admin_with_rw_permission() self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=user') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'user' assert json_resp[0]['user_email'] == self.admin_name assert json_resp[0]['permission'] == 'rw' def test_can_get_user_shared_with_admin(self): self.share_repo_to_admin_with_admin_permission() self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=user') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'user' assert json_resp[0]['user_email'] == self.admin_name assert json_resp[0]['permission'] == 'rw' assert json_resp[0]['is_admin'] == True def test_can_get_group_shared(self): self.share_repo_to_group_with_rw_permission() self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=group') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'group' assert json_resp[0]['group_id'] == self.group_id assert json_resp[0]['permission'] == 'rw' def test_can_get_group_shared_with_admin(self): self.share_repo_to_group_with_admin_permission() self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=group') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'group' assert json_resp[0]['group_id'] == self.group_id assert json_resp[0]['permission'] == 'rw' assert json_resp[0]['is_admin'] == True def test_get_with_invalid_permission(self): self.login_as(self.user) resp = self.client.get(self.url + self.para + '&share_type=group') self.assertEqual(403, resp.status_code) def test_share_repo_to_user(self): self.login_as(self.admin) invalid_email = 'invalid@email.com' permission = 'r' data = { 'repo_id': self.repo_id, 'share_type': 'user', 'permission': permission, 'share_to': [invalid_email, self.tmp_user_email] } resp = self.client.post(self.url, data) self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['failed'][0]['user_email'] == invalid_email assert json_resp['success'][0]['user_email'] == self.tmp_user_email assert json_resp['success'][0]['permission'] == permission def test_share_repo_to_user_with_admin_permission(self): self.login_as(self.admin) invalid_email = 'invalid@email.com' permission = 'admin' data = { 'repo_id': self.repo_id, 'share_type': 'user', 'permission': permission, 'share_to': [invalid_email, self.tmp_user_email] } resp = self.client.post(self.url, data) self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['failed'][0]['user_email'] == invalid_email assert json_resp['success'][0]['user_email'] == self.tmp_user_email assert json_resp['success'][0]['permission'] == 'rw' assert json_resp['success'][0]['is_admin'] == True def test_share_repo_to_group(self): self.login_as(self.admin) invalid_group_id = 'invalid_group_id' permission = 'r' data = { 'repo_id': self.repo_id, 'share_type': 'group', 'permission': permission, 'share_to': [invalid_group_id, self.group_id] } resp = self.client.post(self.url, data) self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['failed'][0]['group_id'] == invalid_group_id assert json_resp['success'][0]['group_id'] == self.group_id assert json_resp['success'][0]['permission'] == permission def test_share_repo_to_group_with_admin_permission(self): self.login_as(self.admin) invalid_group_id = 'invalid_group_id' permission = 'admin' data = { 'repo_id': self.repo_id, 'share_type': 'group', 'permission': permission, 'share_to': [invalid_group_id, self.group_id] } resp = self.client.post(self.url, data) self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['failed'][0]['group_id'] == invalid_group_id assert json_resp['success'][0]['group_id'] == self.group_id assert json_resp['success'][0]['permission'] == 'rw' assert json_resp['success'][0]['is_admin'] == True def test_share_repo_with_invalid_user_permission(self): self.login_as(self.user) invalid_group_id = 'invalid_group_id' permission = 'r' data = { 'repo_id': self.repo_id, 'share_type': 'group', 'permission': permission, 'share_to': [invalid_group_id, self.group_id] } resp = self.client.post(self.url, data) self.assertEqual(403, resp.status_code) def test_modify_repo_user_share_permission(self): # user share repo to tmp user init_permission = 'rw' seafile_api.share_repo(self.repo_id, self.user_name, self.tmp_user_email, init_permission) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) == init_permission self.login_as(self.admin) modified_perm = 'r' data = 'repo_id=%s&share_type=%s&permission=%s&share_to=%s' % \ (self.repo_id, 'user', modified_perm, self.tmp_user_email) resp = self.client.put(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) == modified_perm def test_modify_repo_user_share_permission_to_admin(self): # user share repo to tmp user init_permission = 'rw' seafile_api.share_repo(self.repo_id, self.user_name, self.tmp_user_email, init_permission) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) == init_permission self.login_as(self.admin) modified_perm = 'admin' data = 'repo_id=%s&share_type=%s&permission=%s&share_to=%s' % \ (self.repo_id, 'user', modified_perm, self.tmp_user_email) resp = self.client.put(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['permission'] == 'rw' assert json_resp['is_admin'] == True assert json_resp['user_email'] == self.tmp_user_email def test_modify_repo_group_share_permission(self): # user share repo to tmp user self.share_repo_to_group_with_rw_permission() shared_groups = seafile_api.list_repo_shared_group( self.user_name, self.repo_id) for e in shared_groups: if e.group_id == self.group_id: permission = e.perm break assert permission == 'rw' self.login_as(self.admin) modified_perm = 'r' data = 'repo_id=%s&share_type=%s&permission=%s&share_to=%s' % \ (self.repo_id, 'group', modified_perm, self.group_id) resp = self.client.put(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) shared_groups = seafile_api.list_repo_shared_group( self.user_name, self.repo_id) for e in shared_groups: if e.group_id == self.group_id: permission = e.perm break assert permission == modified_perm def test_modify_repo_group_share_permission_to_admin(self): # user share repo to tmp user self.share_repo_to_group_with_rw_permission() shared_groups = seafile_api.list_repo_shared_group( self.user_name, self.repo_id) for e in shared_groups: if e.group_id == self.group_id: permission = e.perm break assert permission == 'rw' self.login_as(self.admin) modified_perm = 'admin' data = 'repo_id=%s&share_type=%s&permission=%s&share_to=%s' % \ (self.repo_id, 'group', modified_perm, self.group_id) resp = self.client.put(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['permission'] == 'rw' assert json_resp['is_admin'] == True assert json_resp['group_id'] == self.group_id def test_modify_with_invalid_user_permission(self): self.login_as(self.user) resp = self.client.put(self.url, {}, 'application/x-www-form-urlencoded') self.assertEqual(403, resp.status_code) def test_delete_repo_user_share_permission(self): # user share repo to tmp user init_permission = 'rw' seafile_api.share_repo(self.repo_id, self.user_name, self.tmp_user_email, init_permission) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) == init_permission self.login_as(self.admin) data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'user', self.tmp_user_email) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) is None def test_delete_repo_user_share_admin_permission(self): # user share repo to tmp user init_permission = 'rw' seafile_api.share_repo(self.repo_id, self.user_name, self.tmp_user_email, init_permission) ExtraSharePermission.objects.create_share_permission(self.repo.id, self.tmp_user_email, 'admin') assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) == init_permission self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=user') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'user' assert json_resp[0]['user_email'] == self.tmp_user_email assert json_resp[0]['permission'] == 'rw' assert json_resp[0]['is_admin'] == True data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'user', self.tmp_user_email) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) assert seafile_api.check_permission_by_path(self.repo_id, \ '/', self.tmp_user_email) is None resp = self.client.get(self.url + self.para + '&share_type=user') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert not json_resp def test_delete_repo_group_share_permission(self): self.share_repo_to_group_with_rw_permission() shared_groups = seafile_api.list_repo_shared_group( self.user_name, self.repo_id) for e in shared_groups: if e.group_id == self.group_id: permission = e.perm break assert permission == 'rw' self.login_as(self.admin) data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'group', self.group_id) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) def test_delete_repo_group_share_admin_permission(self): self.share_repo_to_group_with_admin_permission() shared_groups = seafile_api.list_repo_shared_group( self.user_name, self.repo_id) for e in shared_groups: if e.group_id == self.group_id: permission = e.perm break assert permission == 'rw' self.login_as(self.admin) resp = self.client.get(self.url + self.para + '&share_type=group') json_resp = json.loads(resp.content) self.assertEqual(200, resp.status_code) assert json_resp[0]['repo_id'] == self.repo_id assert json_resp[0]['path'] == '/' assert json_resp[0]['share_type'] == 'group' assert json_resp[0]['group_id'] == self.group_id assert json_resp[0]['permission'] == 'rw' assert json_resp[0]['is_admin'] == True data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'group', self.group_id) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(200, resp.status_code) def test_delete_with_invalid_user_permission(self): self.login_as(self.user) resp = self.client.delete(self.url, {}, 'application/x-www-form-urlencoded') self.assertEqual(403, resp.status_code) def test_delete_with_unshared_group(self): self.login_as(self.admin) data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'group', self.group_id) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(404, resp.status_code) def test_delete_with_unshared_user(self): self.login_as(self.admin) data = 'repo_id=%s&share_type=%s&share_to=%s' % \ (self.repo_id, 'user', self.tmp_user_email) resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded') self.assertEqual(404, resp.status_code)
from demopy.pyglet.preferences import load_user_pref from demopy.pyglet.resource import load_resource g_caption = 'Tank 2021' g_user_pref = load_user_pref() g_resource = load_resource()
import setuptools with open("README.md", "r") as file_header: long_description = file_header.read() setuptools.setup( name="new-template-USERNAME", version="", author="", author_email="", description="", long_description=long_description, long_description_content_type="text/markdown", url="", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS independent", ], python_requires='>=3.6', )
from dash import dcc import dash_bootstrap_components as dbc from dash import html from constants import * files_location = dcc.Upload( id="upload", children=[ 'Drag and Drop or ', html.A('Select a File') ], multiple=False, style={ 'width': '100%', 'height': '60px', 'lineHeight': '60px', 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px', 'textAlign': 'center' } ) def create_files_list(files): acc_files = [] for i, file in enumerate(files): acc_files.append( dbc.AccordionItem( title=file ) ) accordion = html.Div( dbc.Accordion( acc_files, start_collapsed=True, ), ) return accordion def create_list_radio(files, id_name): radio_options = [] for i, file in enumerate(files): radio_options.append( {"label": file, "value": file} ) radio = dbc.RadioItems( options=radio_options, value=files[0], id=id_name ) return radio def create_input_box(name, id_1, ph, value): return dbc.Row( [ dbc.Col(html.Div(name)), dbc.Col(dbc.Input(id=id_1, placeholder=ph, value=value)), dbc.Row() ] ) def create_drop_down(name, op_list, id_1, default_value=0): return dbc.Row( [ dbc.Col(html.Div(name)), dbc.Col( dcc.Dropdown( id=id_1, options=[dict(label=x, value=x) for x in op_list], value=op_list[default_value] ) ), dbc.Row() ] ) def create_checkbox(name, op_list, id_1, default_value=0, switch_flag=False): return dbc.Row( [ dbc.Col(dcc.Markdown(f""" ** {name} ** """)), # dbc.Col(html.Div(name)), dbc.Col( dbc.Checklist( id=id_1, options=[dict(value=x) for x in op_list], value=op_list[default_value], switch=switch_flag ) ), dbc.Row() ] ) file_details = html.Div( children=[ html.H6("File contents"), html.Div( id="metadata", children=[], style={"height": "25vh", "maxHeight": "25vh", "overflow": "scroll"} ), html.H6("Data variables"), html.Div( id="variable_list", children=[], style={"height": "20vh", "maxHeight": "20vh", "overflow": "scroll"}, ), html.H6("Variable contents"), html.Div( id="variable_content", children=[], style={"height": "24vh", "maxHeight": "24vh", "overflow": "scroll"}, ), ], id="File_contents", style={"marginTop": "2px", "marginBottom": "2px"} ) right_panel = html.Div( [ create_input_box("Sampling Frequency---->", "fs", "50000", 50000), create_input_box("NFFT--------->", "nfft", "2048", 2048), ] ) filter_panel = html.Div( [ create_checkbox("Filter:", [0], "filter_apply", 0, True), create_drop_down("Window type", FILTERS, "filter_type", 11), create_input_box("Cut_off_1", "fc_1", "0", 0), create_input_box("Cut_off_2", "fc_2", "500", 500), dcc.Graph(id="graph_3", style={"height": "29vh"}), dcc.Graph(id="graph_4", style={"height": "29vh"}) ], )
from collections import OrderedDict import gym import logging import re import tree # pip install dm_tree from typing import Dict, List, Optional, Tuple, Type, TYPE_CHECKING, Union from ray.util.debug import log_once from ray.rllib.models.tf.tf_action_dist import TFActionDistribution from ray.rllib.models.modelv2 import ModelV2 from ray.rllib.policy.dynamic_tf_policy import TFMultiGPUTowerStack from ray.rllib.policy.policy import Policy from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.policy.tf_policy import TFPolicy from ray.rllib.policy.view_requirement import ViewRequirement from ray.rllib.models.catalog import ModelCatalog from ray.rllib.utils import force_list from ray.rllib.utils.annotations import ( DeveloperAPI, OverrideToImplementCustomLogic, OverrideToImplementCustomLogic_CallToSuperRecommended, is_overridden, override, ) from ray.rllib.utils.debug import summarize from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY from ray.rllib.utils.spaces.space_utils import get_dummy_batch_for_space from ray.rllib.utils.tf_utils import get_placeholder from ray.rllib.utils.typing import ( LocalOptimizer, ModelGradients, TensorType, TrainerConfigDict, ) if TYPE_CHECKING: from ray.rllib.evaluation import Episode tf1, tf, tfv = try_import_tf() logger = logging.getLogger(__name__) @DeveloperAPI class DynamicTFPolicyV2(TFPolicy): """A TFPolicy that auto-defines placeholders dynamically at runtime. This class is intended to be used and extended by sub-classing. """ @DeveloperAPI def __init__( self, obs_space: gym.spaces.Space, action_space: gym.spaces.Space, config: TrainerConfigDict, *, existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None, existing_model: Optional[ModelV2] = None, ): self.observation_space = obs_space self.action_space = action_space config = dict(self.get_default_config(), **config) self.config = config self.framework = "tf" self._seq_lens = None self._is_tower = existing_inputs is not None self.validate_spaces(obs_space, action_space, config) self.dist_class = self._init_dist_class() # Setup self.model. if existing_model and isinstance(existing_model, list): self.model = existing_model[0] # TODO: (sven) hack, but works for `target_[q_]?model`. for i in range(1, len(existing_model)): setattr(self, existing_model[i][0], existing_model[i][1]) else: self.model = self.make_model() # Auto-update model's inference view requirements, if recurrent. self._update_model_view_requirements_from_init_state() self._init_state_inputs(existing_inputs) self._init_view_requirements() timestep, explore = self._init_input_dict_and_dummy_batch(existing_inputs) ( sampled_action, sampled_action_logp, dist_inputs, self._policy_extra_action_fetches, ) = self._init_action_fetches(timestep, explore) # Phase 1 init. sess = tf1.get_default_session() or tf1.Session( config=tf1.ConfigProto(**self.config["tf_session_args"]) ) batch_divisibility_req = self.get_batch_divisibility_req() prev_action_input = ( self._input_dict[SampleBatch.PREV_ACTIONS] if SampleBatch.PREV_ACTIONS in self._input_dict.accessed_keys else None ) prev_reward_input = ( self._input_dict[SampleBatch.PREV_REWARDS] if SampleBatch.PREV_REWARDS in self._input_dict.accessed_keys else None ) super().__init__( observation_space=obs_space, action_space=action_space, config=config, sess=sess, obs_input=self._input_dict[SampleBatch.OBS], action_input=self._input_dict[SampleBatch.ACTIONS], sampled_action=sampled_action, sampled_action_logp=sampled_action_logp, dist_inputs=dist_inputs, dist_class=self.dist_class, loss=None, # dynamically initialized on run loss_inputs=[], model=self.model, state_inputs=self._state_inputs, state_outputs=self._state_out, prev_action_input=prev_action_input, prev_reward_input=prev_reward_input, seq_lens=self._seq_lens, max_seq_len=config["model"]["max_seq_len"], batch_divisibility_req=batch_divisibility_req, explore=explore, timestep=timestep, ) @DeveloperAPI @staticmethod def enable_eager_execution_if_necessary(): # This is static graph TF policy. # Simply do nothing. pass @DeveloperAPI @OverrideToImplementCustomLogic def get_default_config(self) -> TrainerConfigDict: return {} @DeveloperAPI @OverrideToImplementCustomLogic def validate_spaces( self, obs_space: gym.spaces.Space, action_space: gym.spaces.Space, config: TrainerConfigDict, ): return {} @DeveloperAPI @OverrideToImplementCustomLogic @override(Policy) def loss( self, model: Union[ModelV2, "tf.keras.Model"], dist_class: Type[TFActionDistribution], train_batch: SampleBatch, ) -> Union[TensorType, List[TensorType]]: """Constructs loss computation graph for this TF1 policy. Args: model: The Model to calculate the loss for. dist_class: The action distr. class. train_batch: The training data. Returns: A single loss tensor or a list of loss tensors. """ raise NotImplementedError @DeveloperAPI @OverrideToImplementCustomLogic def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: """Stats function. Returns a dict of statistics. Args: train_batch: The SampleBatch (already) used for training. Returns: The stats dict. """ return {} @DeveloperAPI @OverrideToImplementCustomLogic def grad_stats_fn( self, train_batch: SampleBatch, grads: ModelGradients ) -> Dict[str, TensorType]: """Gradient stats function. Returns a dict of statistics. Args: train_batch: The SampleBatch (already) used for training. Returns: The stats dict. """ return {} @DeveloperAPI @OverrideToImplementCustomLogic def make_model(self) -> ModelV2: """Build underlying model for this Policy. Returns: The Model for the Policy to use. """ # Default ModelV2 model. _, logit_dim = ModelCatalog.get_action_dist( self.action_space, self.config["model"] ) return ModelCatalog.get_model_v2( obs_space=self.observation_space, action_space=self.action_space, num_outputs=logit_dim, model_config=self.config["model"], framework="tf", ) @DeveloperAPI @OverrideToImplementCustomLogic def compute_gradients_fn( self, optimizer: LocalOptimizer, loss: TensorType ) -> ModelGradients: """Gradients computing function (from loss tensor, using local optimizer). Args: policy: The Policy object that generated the loss tensor and that holds the given local optimizer. optimizer: The tf (local) optimizer object to calculate the gradients with. loss: The loss tensor for which gradients should be calculated. Returns: ModelGradients: List of the possibly clipped gradients- and variable tuples. """ return None @DeveloperAPI @OverrideToImplementCustomLogic def apply_gradients_fn( self, policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", grads: ModelGradients, ) -> "tf.Operation": """Gradients computing function (from loss tensor, using local optimizer). Args: policy: The Policy object that generated the loss tensor and that holds the given local optimizer. optimizer: The tf (local) optimizer object to calculate the gradients with. grads: The gradient tensor to be applied. Returns: "tf.Operation": TF operation that applies supplied gradients. """ return None @DeveloperAPI @OverrideToImplementCustomLogic def action_sampler_fn( self, model: ModelV2, *, obs_batch: TensorType, state_batches: TensorType, **kwargs, ) -> Tuple[TensorType, TensorType, TensorType, List[TensorType]]: """Custom function for sampling new actions given policy. Args: model: Underlying model. obs_batch: Observation tensor batch. state_batches: Action sampling state batch. Returns: Sampled action Log-likelihood Action distribution inputs Updated state """ return None, None, None, None @DeveloperAPI @OverrideToImplementCustomLogic def action_distribution_fn( self, model: ModelV2, *, obs_batch: TensorType, state_batches: TensorType, **kwargs, ) -> Tuple[TensorType, type, List[TensorType]]: """Action distribution function for this Policy. Args: model: Underlying model. obs_batch: Observation tensor batch. state_batches: Action sampling state batch. Returns: Distribution input. ActionDistribution class. State outs. """ return None, None, None @DeveloperAPI @OverrideToImplementCustomLogic def get_batch_divisibility_req(self) -> int: """Get batch divisibility request. Returns: Size N. A sample batch must be of size K*N. """ # By default, any sized batch is ok, so simply return 1. return 1 @override(TFPolicy) @DeveloperAPI @OverrideToImplementCustomLogic_CallToSuperRecommended def extra_action_out_fn(self) -> Dict[str, TensorType]: """Extra values to fetch and return from compute_actions(). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. """ extra_action_fetches = super().extra_action_out_fn() extra_action_fetches.update(self._policy_extra_action_fetches) return extra_action_fetches @DeveloperAPI @OverrideToImplementCustomLogic_CallToSuperRecommended def extra_learn_fetches_fn(self) -> Dict[str, TensorType]: """Extra stats to be reported after gradient computation. Returns: Dict[str, TensorType]: An extra fetch-dict. """ return {} @override(TFPolicy) def extra_compute_grad_fetches(self): return dict({LEARNER_STATS_KEY: {}}, **self.extra_learn_fetches_fn()) @override(Policy) @OverrideToImplementCustomLogic_CallToSuperRecommended def postprocess_trajectory( self, sample_batch: SampleBatch, other_agent_batches: Optional[SampleBatch] = None, episode: Optional["Episode"] = None, ): """Post process trajectory in the format of a SampleBatch. Args: sample_batch: sample_batch: batch of experiences for the policy, which will contain at most one episode trajectory. other_agent_batches: In a multi-agent env, this contains a mapping of agent ids to (policy, agent_batch) tuples containing the policy and experiences of the other agents. episode: An optional multi-agent episode object to provide access to all of the internal episode state, which may be useful for model-based or multi-agent algorithms. Returns: The postprocessed sample batch. """ return Policy.postprocess_trajectory(self, sample_batch) @override(TFPolicy) @OverrideToImplementCustomLogic def optimizer( self, ) -> Union["tf.keras.optimizers.Optimizer", List["tf.keras.optimizers.Optimizer"]]: """TF optimizer to use for policy optimization. Returns: A local optimizer or a list of local optimizers to use for this Policy's Model. """ return super().optimizer() def _init_dist_class(self): if is_overridden(self.action_sampler_fn) or is_overridden( self.action_distribution_fn ): if not is_overridden(self.make_model): raise ValueError( "`make_model` is required if `action_sampler_fn` OR " "`action_distribution_fn` is given" ) else: dist_class, _ = ModelCatalog.get_action_dist( self.action_space, self.config["model"] ) return dist_class def _init_view_requirements(self): # If ViewRequirements are explicitly specified. if getattr(self, "view_requirements", None): return # Use default settings. # Add NEXT_OBS, STATE_IN_0.., and others. self.view_requirements = self._get_default_view_requirements() # Combine view_requirements for Model and Policy. # TODO(jungong) : models will not carry view_requirements once they # are migrated to be organic Keras models. self.view_requirements.update(self.model.view_requirements) # Disable env-info placeholder. if SampleBatch.INFOS in self.view_requirements: self.view_requirements[SampleBatch.INFOS].used_for_training = False def _init_state_inputs(self, existing_inputs: Dict[str, "tf1.placeholder"]): """Initialize input placeholders. Args: existing_inputs: existing placeholders. """ if existing_inputs: self._state_inputs = [ v for k, v in existing_inputs.items() if k.startswith("state_in_") ] # Placeholder for RNN time-chunk valid lengths. if self._state_inputs: self._seq_lens = existing_inputs[SampleBatch.SEQ_LENS] # Create new input placeholders. else: self._state_inputs = [ get_placeholder( space=vr.space, time_axis=not isinstance(vr.shift, int), name=k, ) for k, vr in self.model.view_requirements.items() if k.startswith("state_in_") ] # Placeholder for RNN time-chunk valid lengths. if self._state_inputs: self._seq_lens = tf1.placeholder( dtype=tf.int32, shape=[None], name="seq_lens" ) def _init_input_dict_and_dummy_batch( self, existing_inputs: Dict[str, "tf1.placeholder"] ) -> Tuple[Union[int, TensorType], Union[bool, TensorType]]: """Initialized input_dict and dummy_batch data. Args: existing_inputs: When copying a policy, this specifies an existing dict of placeholders to use instead of defining new ones. Returns: timestep: training timestep. explore: whether this policy should explore. """ # Setup standard placeholders. if self._is_tower: assert existing_inputs is not None timestep = existing_inputs["timestep"] explore = False ( self._input_dict, self._dummy_batch, ) = self._create_input_dict_and_dummy_batch( self.view_requirements, existing_inputs ) else: # Placeholder for (sampling steps) timestep (int). timestep = tf1.placeholder_with_default( tf.zeros((), dtype=tf.int64), (), name="timestep" ) # Placeholder for `is_exploring` flag. explore = tf1.placeholder_with_default(True, (), name="is_exploring") ( self._input_dict, self._dummy_batch, ) = self._create_input_dict_and_dummy_batch(self.view_requirements, {}) # Placeholder for `is_training` flag. self._input_dict.set_training(self._get_is_training_placeholder()) return timestep, explore def _create_input_dict_and_dummy_batch(self, view_requirements, existing_inputs): """Creates input_dict and dummy_batch for loss initialization. Used for managing the Policy's input placeholders and for loss initialization. Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays. Args: view_requirements: The view requirements dict. existing_inputs (Dict[str, tf.placeholder]): A dict of already existing placeholders. Returns: Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The input_dict/dummy_batch tuple. """ input_dict = {} for view_col, view_req in view_requirements.items(): # Point state_in to the already existing self._state_inputs. mo = re.match("state_in_(\d+)", view_col) if mo is not None: input_dict[view_col] = self._state_inputs[int(mo.group(1))] # State-outs (no placeholders needed). elif view_col.startswith("state_out_"): continue # Skip action dist inputs placeholder (do later). elif view_col == SampleBatch.ACTION_DIST_INPUTS: continue # This is a tower: Input placeholders already exist. elif view_col in existing_inputs: input_dict[view_col] = existing_inputs[view_col] # All others. else: time_axis = not isinstance(view_req.shift, int) if view_req.used_for_training: # Create a +time-axis placeholder if the shift is not an # int (range or list of ints). # Do not flatten actions if action flattening disabled. if self.config.get("_disable_action_flattening") and view_col in [ SampleBatch.ACTIONS, SampleBatch.PREV_ACTIONS, ]: flatten = False # Do not flatten observations if no preprocessor API used. elif ( view_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS] and self.config["_disable_preprocessor_api"] ): flatten = False # Flatten everything else. else: flatten = True input_dict[view_col] = get_placeholder( space=view_req.space, name=view_col, time_axis=time_axis, flatten=flatten, ) dummy_batch = self._get_dummy_batch_from_view_requirements(batch_size=32) return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch def _init_action_fetches( self, timestep: Union[int, TensorType], explore: Union[bool, TensorType] ) -> Tuple[TensorType, TensorType, TensorType, type, Dict[str, TensorType]]: """Create action related fields for base Policy and loss initialization.""" # Multi-GPU towers do not need any action computing/exploration # graphs. sampled_action = None sampled_action_logp = None dist_inputs = None extra_action_fetches = {} self._state_out = None if not self._is_tower: # Create the Exploration object to use for this Policy. self.exploration = self._create_exploration() # Fully customized action generation (e.g., custom policy). if is_overridden(self.action_sampler_fn): ( sampled_action, sampled_action_logp, dist_inputs, self._state_out, ) = self.action_sampler_fn( self.model, obs_batch=self._input_dict[SampleBatch.CUR_OBS], state_batches=self._state_inputs, seq_lens=self._seq_lens, prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS), prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS), explore=explore, is_training=self._input_dict.is_training, ) # Distribution generation is customized, e.g., DQN, DDPG. else: if is_overridden(self.action_distribution_fn): # Try new action_distribution_fn signature, supporting # state_batches and seq_lens. in_dict = self._input_dict ( dist_inputs, self.dist_class, self._state_out, ) = self.action_distribution_fn( self.model, input_dict=in_dict, state_batches=self._state_inputs, seq_lens=self._seq_lens, explore=explore, timestep=timestep, is_training=in_dict.is_training, ) # Default distribution generation behavior: # Pass through model. E.g., PG, PPO. else: if isinstance(self.model, tf.keras.Model): dist_inputs, self._state_out, extra_action_fetches = self.model( self._input_dict ) else: dist_inputs, self._state_out = self.model(self._input_dict) action_dist = self.dist_class(dist_inputs, self.model) # Using exploration to get final action (e.g. via sampling). ( sampled_action, sampled_action_logp, ) = self.exploration.get_exploration_action( action_distribution=action_dist, timestep=timestep, explore=explore ) if dist_inputs is not None: extra_action_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs if sampled_action_logp is not None: extra_action_fetches[SampleBatch.ACTION_LOGP] = sampled_action_logp extra_action_fetches[SampleBatch.ACTION_PROB] = tf.exp( tf.cast(sampled_action_logp, tf.float32) ) return ( sampled_action, sampled_action_logp, dist_inputs, extra_action_fetches, ) def _init_optimizers(self): # Create the optimizer/exploration optimizer here. Some initialization # steps (e.g. exploration postprocessing) may need this. optimizers = force_list(self.optimizer()) if getattr(self, "exploration", None): optimizers = self.exploration.get_exploration_optimizer(optimizers) # No optimizers produced -> Return. if not optimizers: return # The list of local (tf) optimizers (one per loss term). self._optimizers = optimizers # Backward compatibility. self._optimizer = optimizers[0] def maybe_initialize_optimizer_and_loss(self): # We don't need to initialize loss calculation for MultiGPUTowerStack. if self._is_tower: return # Loss initialization and model/postprocessing test calls. self._init_optimizers() self._initialize_loss_from_dummy_batch(auto_remove_unneeded_view_reqs=True) # Create MultiGPUTowerStacks, if we have at least one actual # GPU or >1 CPUs (fake GPUs). if len(self.devices) > 1 or any("gpu" in d for d in self.devices): # Per-GPU graph copies created here must share vars with the # policy. Therefore, `reuse` is set to tf1.AUTO_REUSE because # Adam nodes are created after all of the device copies are # created. with tf1.variable_scope("", reuse=tf1.AUTO_REUSE): self.multi_gpu_tower_stacks = [ TFMultiGPUTowerStack(policy=self) for _ in range(self.config.get("num_multi_gpu_tower_stacks", 1)) ] # Initialize again after loss and tower init. self.get_session().run(tf1.global_variables_initializer()) @override(Policy) def _initialize_loss_from_dummy_batch( self, auto_remove_unneeded_view_reqs: bool = True ) -> None: # Test calls depend on variable init, so initialize model first. self.get_session().run(tf1.global_variables_initializer()) # Fields that have not been accessed are not needed for action # computations -> Tag them as `used_for_compute_actions=False`. for key, view_req in self.view_requirements.items(): if ( not key.startswith("state_in_") and key not in self._input_dict.accessed_keys ): view_req.used_for_compute_actions = False for key, value in self.extra_action_out_fn().items(): self._dummy_batch[key] = get_dummy_batch_for_space( gym.spaces.Box( -1.0, 1.0, shape=value.shape.as_list()[1:], dtype=value.dtype.name ), batch_size=len(self._dummy_batch), ) self._input_dict[key] = get_placeholder(value=value, name=key) if key not in self.view_requirements: logger.info("Adding extra-action-fetch `{}` to view-reqs.".format(key)) self.view_requirements[key] = ViewRequirement( space=gym.spaces.Box( -1.0, 1.0, shape=value.shape[1:], dtype=value.dtype.name ), used_for_compute_actions=False, ) dummy_batch = self._dummy_batch logger.info("Testing `postprocess_trajectory` w/ dummy batch.") self.exploration.postprocess_trajectory(self, dummy_batch, self.get_session()) _ = self.postprocess_trajectory(dummy_batch) # Add new columns automatically to (loss) input_dict. for key in dummy_batch.added_keys: if key not in self._input_dict: self._input_dict[key] = get_placeholder( value=dummy_batch[key], name=key ) if key not in self.view_requirements: self.view_requirements[key] = ViewRequirement( space=gym.spaces.Box( -1.0, 1.0, shape=dummy_batch[key].shape[1:], dtype=dummy_batch[key].dtype, ), used_for_compute_actions=False, ) train_batch = SampleBatch( dict(self._input_dict, **self._loss_input_dict), _is_training=True, ) if self._state_inputs: train_batch[SampleBatch.SEQ_LENS] = self._seq_lens self._loss_input_dict.update( {SampleBatch.SEQ_LENS: train_batch[SampleBatch.SEQ_LENS]} ) self._loss_input_dict.update({k: v for k, v in train_batch.items()}) if log_once("loss_init"): logger.debug( "Initializing loss function with dummy input:\n\n{}\n".format( summarize(train_batch) ) ) losses = self._do_loss_init(train_batch) all_accessed_keys = ( train_batch.accessed_keys | dummy_batch.accessed_keys | dummy_batch.added_keys | set(self.model.view_requirements.keys()) ) TFPolicy._initialize_loss( self, losses, [(k, v) for k, v in train_batch.items() if k in all_accessed_keys] + ( [(SampleBatch.SEQ_LENS, train_batch[SampleBatch.SEQ_LENS])] if SampleBatch.SEQ_LENS in train_batch else [] ), ) if "is_training" in self._loss_input_dict: del self._loss_input_dict["is_training"] # Call the grads stats fn. # TODO: (sven) rename to simply stats_fn to match eager and torch. self._stats_fetches.update(self.grad_stats_fn(train_batch, self._grads)) # Add new columns automatically to view-reqs. if auto_remove_unneeded_view_reqs: # Add those needed for postprocessing and training. all_accessed_keys = train_batch.accessed_keys | dummy_batch.accessed_keys # Tag those only needed for post-processing (with some exceptions). for key in dummy_batch.accessed_keys: if ( key not in train_batch.accessed_keys and key not in self.model.view_requirements and key not in [ SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX, SampleBatch.UNROLL_ID, SampleBatch.DONES, SampleBatch.REWARDS, SampleBatch.INFOS, SampleBatch.OBS_EMBEDS, ] ): if key in self.view_requirements: self.view_requirements[key].used_for_training = False if key in self._loss_input_dict: del self._loss_input_dict[key] # Remove those not needed at all (leave those that are needed # by Sampler to properly execute sample collection). # Also always leave DONES, REWARDS, and INFOS, no matter what. for key in list(self.view_requirements.keys()): if ( key not in all_accessed_keys and key not in [ SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX, SampleBatch.UNROLL_ID, SampleBatch.DONES, SampleBatch.REWARDS, SampleBatch.INFOS, ] and key not in self.model.view_requirements ): # If user deleted this key manually in postprocessing # fn, warn about it and do not remove from # view-requirements. if key in dummy_batch.deleted_keys: logger.warning( "SampleBatch key '{}' was deleted manually in " "postprocessing function! RLlib will " "automatically remove non-used items from the " "data stream. Remove the `del` from your " "postprocessing function.".format(key) ) # If we are not writing output to disk, safe to erase # this key to save space in the sample batch. elif self.config["output"] is None: del self.view_requirements[key] if key in self._loss_input_dict: del self._loss_input_dict[key] # Add those data_cols (again) that are missing and have # dependencies by view_cols. for key in list(self.view_requirements.keys()): vr = self.view_requirements[key] if ( vr.data_col is not None and vr.data_col not in self.view_requirements ): used_for_training = vr.data_col in train_batch.accessed_keys self.view_requirements[vr.data_col] = ViewRequirement( space=vr.space, used_for_training=used_for_training ) self._loss_input_dict_no_rnn = { k: v for k, v in self._loss_input_dict.items() if (v not in self._state_inputs and v != self._seq_lens) } def _do_loss_init(self, train_batch: SampleBatch): losses = self.loss(self.model, self.dist_class, train_batch) losses = force_list(losses) self._stats_fetches.update(self.stats_fn(train_batch)) # Override the update ops to be those of the model. self._update_ops = [] if not isinstance(self.model, tf.keras.Model): self._update_ops = self.model.update_ops() return losses @override(TFPolicy) @DeveloperAPI def copy(self, existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> TFPolicy: """Creates a copy of self using existing input placeholders.""" flat_loss_inputs = tree.flatten(self._loss_input_dict) flat_loss_inputs_no_rnn = tree.flatten(self._loss_input_dict_no_rnn) # Note that there might be RNN state inputs at the end of the list if len(flat_loss_inputs) != len(existing_inputs): raise ValueError( "Tensor list mismatch", self._loss_input_dict, self._state_inputs, existing_inputs, ) for i, v in enumerate(flat_loss_inputs_no_rnn): if v.shape.as_list() != existing_inputs[i].shape.as_list(): raise ValueError( "Tensor shape mismatch", i, v.shape, existing_inputs[i].shape ) # By convention, the loss inputs are followed by state inputs and then # the seq len tensor. rnn_inputs = [] for i in range(len(self._state_inputs)): rnn_inputs.append( ( "state_in_{}".format(i), existing_inputs[len(flat_loss_inputs_no_rnn) + i], ) ) if rnn_inputs: rnn_inputs.append((SampleBatch.SEQ_LENS, existing_inputs[-1])) existing_inputs_unflattened = tree.unflatten_as( self._loss_input_dict_no_rnn, existing_inputs[: len(flat_loss_inputs_no_rnn)], ) input_dict = OrderedDict( [("is_exploring", self._is_exploring), ("timestep", self._timestep)] + [ (k, existing_inputs_unflattened[k]) for i, k in enumerate(self._loss_input_dict_no_rnn.keys()) ] + rnn_inputs ) instance = self.__class__( self.observation_space, self.action_space, self.config, existing_inputs=input_dict, existing_model=[ self.model, # Deprecated: Target models should all reside under # `policy.target_model` now. ("target_q_model", getattr(self, "target_q_model", None)), ("target_model", getattr(self, "target_model", None)), ], ) instance._loss_input_dict = input_dict losses = instance._do_loss_init(SampleBatch(input_dict)) loss_inputs = [ (k, existing_inputs_unflattened[k]) for i, k in enumerate(self._loss_input_dict_no_rnn.keys()) ] TFPolicy._initialize_loss(instance, losses, loss_inputs) instance._stats_fetches.update( instance.grad_stats_fn(input_dict, instance._grads) ) return instance @override(Policy) @DeveloperAPI def get_initial_state(self) -> List[TensorType]: if self.model: return self.model.get_initial_state() else: return [] @override(Policy) @DeveloperAPI def load_batch_into_buffer( self, batch: SampleBatch, buffer_index: int = 0, ) -> int: # Set the is_training flag of the batch. batch.set_training(True) # Shortcut for 1 CPU only: Store batch in # `self._loaded_single_cpu_batch`. if len(self.devices) == 1 and self.devices[0] == "/cpu:0": assert buffer_index == 0 self._loaded_single_cpu_batch = batch return len(batch) input_dict = self._get_loss_inputs_dict(batch, shuffle=False) data_keys = tree.flatten(self._loss_input_dict_no_rnn) if self._state_inputs: state_keys = self._state_inputs + [self._seq_lens] else: state_keys = [] inputs = [input_dict[k] for k in data_keys] state_inputs = [input_dict[k] for k in state_keys] return self.multi_gpu_tower_stacks[buffer_index].load_data( sess=self.get_session(), inputs=inputs, state_inputs=state_inputs, ) @override(Policy) @DeveloperAPI def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int: # Shortcut for 1 CPU only: Batch should already be stored in # `self._loaded_single_cpu_batch`. if len(self.devices) == 1 and self.devices[0] == "/cpu:0": assert buffer_index == 0 return ( len(self._loaded_single_cpu_batch) if self._loaded_single_cpu_batch is not None else 0 ) return self.multi_gpu_tower_stacks[buffer_index].num_tuples_loaded @override(Policy) @DeveloperAPI def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): # Shortcut for 1 CPU only: Batch should already be stored in # `self._loaded_single_cpu_batch`. if len(self.devices) == 1 and self.devices[0] == "/cpu:0": assert buffer_index == 0 if self._loaded_single_cpu_batch is None: raise ValueError( "Must call Policy.load_batch_into_buffer() before " "Policy.learn_on_loaded_batch()!" ) # Get the correct slice of the already loaded batch to use, # based on offset and batch size. batch_size = self.config.get( "sgd_minibatch_size", self.config["train_batch_size"] ) if batch_size >= len(self._loaded_single_cpu_batch): sliced_batch = self._loaded_single_cpu_batch else: sliced_batch = self._loaded_single_cpu_batch.slice( start=offset, end=offset + batch_size ) return self.learn_on_batch(sliced_batch) return self.multi_gpu_tower_stacks[buffer_index].optimize( self.get_session(), offset ) @override(TFPolicy) def gradients(self, optimizer, loss): optimizers = force_list(optimizer) losses = force_list(loss) if is_overridden(self.compute_gradients_fn): # New API: Allow more than one optimizer -> Return a list of # lists of gradients. if self.config["_tf_policy_handles_more_than_one_loss"]: return self.compute_gradients_fn(optimizers, losses) # Old API: Return a single List of gradients. else: return self.compute_gradients_fn(optimizers[0], losses[0]) else: return super().gradients(self, optimizers, losses)
import os import tkinter as tk import tkinter.scrolledtext as tkscrolled import tkinter.filedialog as tkfd from src.utilities.matdis.prime_op import is_prime from src.cryptography import elgamal class ResultPageFrame(tk.Frame): def __init__(self, master, title, process_time, plaindir, cipherdir): tk.Frame.__init__(self, master) self.title = tk.Label(self, text=title, font='none 12 bold') self.title.grid(column=0, columnspan=2, pady=20, sticky=tk.W+tk.E) # Waktu proses self.time_labels = [] self.time_labels.append(tk.Label(master=self, text='Waktu proses: ')) self.time_labels.append(tk.Label(master=self, text='{} detik'.format(process_time))) self.time_labels[0].grid(pady=10, sticky=tk.W) self.time_labels[1].grid(row=self.time_labels[0].grid_info()['row'], pady=10, column=1) # *Isi input + ukuran plainfile_size = os.path.getsize(plaindir) self.plain_size_labels = [] self.plain_size_labels.append(tk.Label(master=self, text='Ukuran file masukan: ')) self.plain_size_labels.append(tk.Label(master=self, text='{} bytes'.format(plainfile_size))) self.plain_size_labels[0].grid(pady=10, sticky=tk.W) self.plain_size_labels[1].grid(row=self.plain_size_labels[0].grid_info()['row'], pady=10, column=1) tk.Label(self, text='Isi input:').grid(sticky=tk.W) with open(plaindir, 'rb') as fp: self.cipher_text = tkscrolled.ScrolledText(self, height=10) self.cipher_text.insert(1.0, fp.read()) self.cipher_text.grid(columnspan=2) # *Isi keluaran + ukuran plainfile_size = os.path.getsize(cipherdir) self.cipher_size_labels = [] self.cipher_size_labels.append(tk.Label(master=self, text='Ukuran file keluaran: ')) self.cipher_size_labels.append(tk.Label(master=self, text='{} bytes'.format(plainfile_size))) self.cipher_size_labels[0].grid(pady=10, sticky=tk.W) self.cipher_size_labels[1].grid(row=self.cipher_size_labels[0].grid_info()['row'], pady=10, column=1) tk.Label(self, text='Hasil keluaran:').grid(sticky=tk.W) with open(cipherdir, 'rb') as fp: self.cipher_text = tkscrolled.ScrolledText(self, height=10) self.cipher_text.insert(1.0, fp.read()) self.cipher_text.grid(columnspan=2) return_button = tk.Button(self, text='Kembali', command=lambda: master.open_main_menu()) return_button.grid(column=1, pady=50, sticky=tk.W)
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import unittest import base64 from azext_k8s_configuration.providers.SourceControlConfigurationProvider import get_protected_settings from azure.cli.core.azclierror import InvalidArgumentValueError, MutuallyExclusiveArgumentError from azext_k8s_configuration.validators import ( validate_configuration_name, validate_known_hosts, validate_operator_instance_name, validate_operator_namespace, validate_private_key, validate_url_with_params, ) from Crypto.PublicKey import DSA class TestValidateKeyTypes(unittest.TestCase): def test_bad_private_key(self): private_key_encoded = base64.b64encode("this is not a valid private key".encode('utf-8')).decode('utf-8') err = "Error! --ssh-private-key provided in invalid format" with self.assertRaises(InvalidArgumentValueError) as cm: validate_private_key(private_key_encoded) self.assertEqual(str(cm.exception), err) def test_rsa_private_key(self): rsa_key = "LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUJsd0FBQUFkemMyZ3RjbgpOaEFBQUFBd0VBQVFBQUFZRUF1bVA5M09qRHdjdlEyZHZhRlJNNWYrMEhVSnFvOFJnbmdwaGN3NFZidnd1TVNoQTZFc2FyCjFsam1CNUNnT1NGNHJqNDIvcmdxMW1hWndoSUgvckdPSElNa0lIcjFrZmNKMnBrR3ZhK1NxVm4wWUhzMjBpUW02ay92ZXQKdXdVQ2J1QjlxSU5zL2h2b0ppQ21JMUVpVWZ4VGoxRFJCUG15OXR3Qm52bW5FS1kxZ2NhT2YrS2Y1aGhCc09pd00yZnBRTwp0aTlIcHVzM1JhNXpFeElWbjJzVitpRjVvV3ZZM1JQTTlKNXFPMXRObUtOWll6TjgzbDYxMlBzRmR1Vm1QM2NUUlJtK2pzCjdzZW5jY0U0RitzU0hQMlJpMk5DU0JvZ2RJOFR5VTlzeTM3Szl3bFJ5NGZkWWI1K1o3YUZjMjhTNDdDWlo5dTRFVXdWUEYKbjU4dTUzajU0empwdXNpei9ZWmx3MG5NeEQ5SXI0aHlJZ2s0NlUzVmdHR0NPUytZVTVZT2JURGhPRG5udk5VRkg2NVhCagpEM3l6WVJuRDA3b2swQ1JUR3RCOWMzTjBFNDBjUnlPeVpEQ0l5a0FPdHZXYnBUZzdnaXA2UDc4K2pLVlFnanFwRTVQdi9ICnl1dlB6cUJoUkpWcG5VR1dvWnFlcWJhd2N5RWZwdHFLaTNtWUdVMHBBQUFGa0U5cUs3SlBhaXV5QUFBQUIzTnphQzF5YzIKRUFBQUdCQUxwai9kem93OEhMME5uYjJoVVRPWC90QjFDYXFQRVlKNEtZWE1PRlc3OExqRW9RT2hMR3E5Wlk1Z2VRb0RraAplSzQrTnY2NEt0Wm1tY0lTQi82eGpoeURKQ0I2OVpIM0NkcVpCcjJ2a3FsWjlHQjdOdElrSnVwUDczcmJzRkFtN2dmYWlECmJQNGI2Q1lncGlOUklsSDhVNDlRMFFUNXN2YmNBWjc1cHhDbU5ZSEdqbi9pbitZWVFiRG9zRE5uNlVEcll2UjZick4wV3UKY3hNU0ZaOXJGZm9oZWFGcjJOMFR6UFNlYWp0YlRaaWpXV016Zk41ZXRkajdCWGJsWmo5M0UwVVp2bzdPN0hwM0hCT0JmcgpFaHo5a1l0alFrZ2FJSFNQRThsUGJNdCt5dmNKVWN1SDNXRytmbWUyaFhOdkV1T3dtV2ZidUJGTUZUeForZkx1ZDQrZU00CjZicklzLzJHWmNOSnpNUS9TSytJY2lJSk9PbE4xWUJoZ2prdm1GT1dEbTB3NFRnNTU3elZCUit1VndZdzk4czJFWnc5TzYKSk5Ba1V4clFmWE56ZEJPTkhFY2pzbVF3aU1wQURyYjFtNlU0TzRJcWVqKy9Qb3lsVUlJNnFST1Q3L3g4cnJ6ODZnWVVTVgphWjFCbHFHYW5xbTJzSE1oSDZiYWlvdDVtQmxOS1FBQUFBTUJBQUVBQUFHQkFMaElmSXFacUZKSFRXcllyN24rays4alR3ClFtcGJvWmc1YmZSWGdhdGljaEo4ZGlXOGlNblFFRVRBcFd0OU5FZ0tqbDRrSGRuSnoyUERkZzFIN0ExaHppbkNsdzZMTTAKYUkyMGxyR2NrWWpXNDRNd3ozYmRQNHlURTllSXRiM0pmN1pNSGpqek4rSy96bWN0eWdMeXFZSzVXYTljM1JnMXdIRWFNNAplakUvNDg4M25WUmJvSFJDcjFCVi8wQVVFTTZhNisrRHpVZW9WdWdWL3RsV3RVMlJuQlZ4eCtJS0FVSDZRTHJFU2JkUkRoCkVGUEFhRWtEb3crd3dDcFpqTXBhMHdRZXBDSkhwWkJLN1pBU25EU3R3Y2RKRE4yeHZzdVNOOGg0bkN0MlZWd0xRenJKeVAKU2VjcWM3M1hIc3E3VWx6ZU5veHlTVW9KZ2JjNTZoRzhWYS9ITlhsOUtkdkFlWUVzS1l1OW5NRUprVSt3VHo1KzUvM2wwVQpxSkErb0pTVTducjYydlVKQnljbXg0SFdBcjJ6QkR2QnFBUWMzRG9LWHczeVM1Z0c5Zkc0c25OUUkxOHVRSjdOSjdndHZHClpKRU56bTNJMmFTMzl5dndWZnFIMXpXVERxU2VNeWhYeWFnTkFEcGtCVEJIMVJQR2NtTFplclFmWWx1djVVUmFNTXdRQUEKQU1BdE9oNHFwUUhidm5tQ1RVakx4dXRrWnRaRlhNa0hmSTk5NS9Nd2RvWVY1eWRKV0pUVGsyKzB1QVBIcTZEejk2b3dWbQpjUkF2WDBDOVU5d3ZRMkpnR0Y1MDZzcmgzZkVpUzM2d1ArOFd0RjZ6ODd0enJwQnpQVHIxOGRONURCOEx5L3dXRk5BVTdqClBUbXM0dHlUY1VsRXR3eEt4TXJTNC9ROUZwMWozL3JNdnNZdGVaSVgycmN4YUhkWWJDVGJtTUpZS3lVTWVXTk56NXpub1EKcFcyd2NDSmpJc1MvS1F2WmR4cHZwNWd0RXE1WlEva3FvLzJVRWd1NHhwdDNWeUNma0FBQURCQVBOSHVEU1R0ZEpJWjdzcwpaQkVwcUE4TE54b1dMQ2RURnlpRERiUnpYOWVPTldkRFQ3NklaRE9HejczNXJhZUFSM2FiY0FhaUM0dDQwTFJTNGEyN29sCm9wK1dSak9wcjVNYUtOUnk4MCt6VWw3WUlSMjErKzVnMFVnNkRnQlBEdmFJSHFSTnRsZ2gyVXdTL0cva1lOaUlEY0JiS1EKOUcvdTI4ekRIRUtNL21YYS8wYnFtSm16ZUYvY1BLdHdScFE3clFoRnAwUkdFcnZtc0l4dDl6K0ZZZUdncjFBYUVTV0ZlTApmUmZsa0lnOVBWOEl0b09GN25qK2VtMkxkNTNCS1hSUUFBQU1FQXhDTFBueHFFVEsyMW5QOXFxQVYzMEZUUkhGNW9kRHg4ClpiYnZIbjgwdEgxQjYwZjRtTGJFRm56REZFR0NwS2Rwb3dyUXR6WUhnQzNBaGNJUE9BbXFXaDg0UEFPbisreHhFanNaQkwKRWhVWmNFUndkYTMzTnJDNTVEMzZxbDBMZEsrSGRuZUFzVGZhazh0bWVlOTJWb0RxdWovNGFSMjBmUTBJUFVzMU8rWHNRNQpGWVFYQzZndExHZGRzRVFoSDF6MTh6RGtWa1UwdEhlZkJaL2pFZXBiOEZScXoxR1hpT0hGK2xBZVE2b3crS0xlcWtCcXQ4CkZxMHhGdG90SlF4VnFWQUFBQUYycHZhVzV1YVhOQVJFVlRTMVJQVUMxUVRWVkdVRFpOQVFJRAotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K" validate_private_key(rsa_key) def test_dsa_private_key(self): key = DSA.generate(2048) private_key_encoded = base64.b64encode(key.export_key()).decode('utf-8') validate_private_key(private_key_encoded) def test_ecdsa_private_key(self): ecdsa_key = "LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFhQUFBQUJObFkyUnpZUwoxemFHRXlMVzVwYzNSd01qVTJBQUFBQ0c1cGMzUndNalUyQUFBQVFRUjBRc1BjWmJKeWZPaXE2a1M1d0VaeE5DbmR2YVJHCm1ETEUvVVBjakpDTDZQTVIyZmdPS2NnWlhzTEZkTUFzSnExS2d6TmNDN0ZXNGE0L0wrYTFWWUxDQUFBQXNIZ1RqTFY0RTQKeTFBQUFBRTJWalpITmhMWE5vWVRJdGJtbHpkSEF5TlRZQUFBQUlibWx6ZEhBeU5UWUFBQUJCQkhSQ3c5eGxzbko4NktycQpSTG5BUm5FMEtkMjlwRWFZTXNUOVE5eU1rSXZvOHhIWitBNHB5Qmxld3NWMHdDd21yVXFETTF3THNWYmhyajh2NXJWVmdzCklBQUFBZ0h1U3laU0NUZzJZbVNpOG9aY2c0cnVpODh0T1NUSm1aRVhkR09hdExySHNBQUFBWGFtOXBibTVwYzBCRVJWTkwKVkU5UUxWQk5WVVpRTmswQgotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K" validate_private_key(ecdsa_key) def test_ed25519_private_key(self): ed25519_key = "LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFNd0FBQUF0emMyZ3RaVwpReU5UVXhPUUFBQUNCNjF0RzkrNGFmOTZsWGoyUStjWjJMT2JpV1liMlRtWVR6N3NSV0JDM1hVZ0FBQUtCRzFWRWZSdFZSCkh3QUFBQXR6YzJndFpXUXlOVFV4T1FBQUFDQjYxdEc5KzRhZjk2bFhqMlErY1oyTE9iaVdZYjJUbVlUejdzUldCQzNYVWcKQUFBRURRTStLcCtOSWpJVUhSUklqRFE5VDZ0U0V0SG9Ic0w1QjlwbHpCNlZ2MnluclcwYjM3aHAvM3FWZVBaRDV4bllzNQp1SlpodlpPWmhQUHV4RllFTGRkU0FBQUFGMnB2YVc1dWFYTkFSRVZUUzFSUFVDMVFUVlZHVURaTkFRSURCQVVHCi0tLS0tRU5EIE9QRU5TU0ggUFJJVkFURSBLRVktLS0tLQo=" validate_private_key(ed25519_key) class TestValidateK8sNaming(unittest.TestCase): def test_long_operator_namespace(self): operator_namespace = "thisisaverylongnamethatistoolongtobeused" namespace = OperatorNamespace(operator_namespace) err = 'Error! Invalid --operator-namespace' with self.assertRaises(InvalidArgumentValueError) as cm: validate_operator_namespace(namespace) self.assertEqual(str(cm.exception), err) def test_long_operator_instance_name(self): operator_instance_name = "thisisaverylongnamethatistoolongtobeused" namespace = OperatorInstanceName(operator_instance_name) err = 'Error! Invalid --operator-instance-name' with self.assertRaises(InvalidArgumentValueError) as cm: validate_operator_instance_name(namespace) self.assertEqual(str(cm.exception), err) def test_caps_operator_namespace(self): operator_namespace = 'Myoperatornamespace' namespace = OperatorNamespace(operator_namespace) err = 'Error! Invalid --operator-namespace' with self.assertRaises(InvalidArgumentValueError) as cm: validate_operator_namespace(namespace) self.assertEqual(str(cm.exception), err) def test_caps_operator_instance_name(self): operator_instance_name = 'Myoperatorname' namespace = OperatorInstanceName(operator_instance_name) err = 'Error! Invalid --operator-instance-name' with self.assertRaises(InvalidArgumentValueError) as cm: validate_operator_instance_name(namespace) self.assertEqual(str(cm.exception), err) def test_long_config_name(self): config_name = "thisisaverylongnamethatistoolongtobeusedthisisaverylongnamethatistoolongtobeused" err = 'Error! Invalid --name' namespace = ConfigurationName(config_name) with self.assertRaises(InvalidArgumentValueError) as cm: validate_configuration_name(namespace) self.assertEqual(str(cm.exception), err) def test_valid_config_name(self): config_name = "this-is-a-valid-config" namespace = ConfigurationName(config_name) validate_configuration_name(namespace) def test_caps_config_name(self): config_name = "ThisIsaCapsConfigName" err = 'Error! Invalid --name' namespace = ConfigurationName(config_name) with self.assertRaises(InvalidArgumentValueError) as cm: validate_configuration_name(namespace) self.assertEqual(str(cm.exception), err) def test_dot_config_name(self): config_name = "a234567890b234567890c234567890d234567890e234567890f234567890.23" err = 'Error! Invalid --name' namespace = ConfigurationName(config_name) with self.assertRaises(InvalidArgumentValueError) as cm: validate_configuration_name(namespace) self.assertEqual(str(cm.exception), err) def test_end_hyphen_config_name(self): config_name = "a234567890b234567890c234567890d234567890e234567890f23456789023-" err = 'Error! Invalid --name' namespace = ConfigurationName(config_name) with self.assertRaises(InvalidArgumentValueError) as cm: validate_configuration_name(namespace) self.assertEqual(str(cm.exception), err) class TestValidateURLWithParams(unittest.TestCase): def test_ssh_private_key_with_ssh_url(self): validate_url_with_params('git@github.com:jonathan-innis/helm-operator-get-started-private.git', True, False, False, False, False, False) def test_ssh_known_hosts_with_ssh_url(self): validate_url_with_params('git@github.com:jonathan-innis/helm-operator-get-started-private.git', False, False, True, False, False, False) def test_https_auth_with_https_url(self): validate_url_with_params('https://github.com/jonathan-innis/helm-operator-get-started-private.git', False, False, False, False, True, True) def test_ssh_private_key_with_https_url(self): err = 'Error! An --ssh-private-key cannot be used with an http(s) url' with self.assertRaises(MutuallyExclusiveArgumentError) as cm: validate_url_with_params('https://github.com/jonathan-innis/helm-operator-get-started-private.git', True, False, False, False, False, False) self.assertEqual(str(cm.exception), err) def test_ssh_known_hosts_with_https_url(self): err = 'Error! --ssh-known-hosts cannot be used with an http(s) url' with self.assertRaises(MutuallyExclusiveArgumentError) as cm: validate_url_with_params('https://github.com/jonathan-innis/helm-operator-get-started-private.git', False, False, True, False, False, False) self.assertEqual(str(cm.exception), err) def test_https_auth_with_ssh_url(self): err = 'Error! https auth (--https-user and --https-key) cannot be used with a non-http(s) url' with self.assertRaises(MutuallyExclusiveArgumentError) as cm: validate_url_with_params('git@github.com:jonathan-innis/helm-operator-get-started-private.git', False, False, False, False, True, True) self.assertEqual(str(cm.exception), err) class TestValidateKnownHosts(unittest.TestCase): def test_valid_known_hosts(self): known_hosts_raw = "ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H" known_hosts_encoded = base64.b64encode(known_hosts_raw.encode('utf-8')).decode('utf-8') validate_known_hosts(known_hosts_encoded) def test_valid_known_hosts_with_comment(self): known_hosts_raw = "ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H ThisIsAValidComment" known_hosts_encoded = base64.b64encode(known_hosts_raw.encode('utf-8')).decode('utf-8') validate_known_hosts(known_hosts_encoded) def test_valid_known_hosts_with_comment_own_line(self): known_hosts_raw = "#this is a comment on its own line\nssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H" known_hosts_encoded = base64.b64encode(known_hosts_raw.encode('utf-8')).decode('utf-8') validate_known_hosts(known_hosts_encoded) def test_invalid_known_hosts(self): known_hosts_raw = "thisisabadknownhostsfilethatisaninvalidformat" known_hosts_encoded = base64.b64encode(known_hosts_raw.encode('utf-8')).decode('utf-8') err = 'Error! ssh known_hosts provided in wrong format' with self.assertRaises(InvalidArgumentValueError) as cm: validate_known_hosts(known_hosts_encoded) self.assertEqual(str(cm.exception), err) class OperatorNamespace: def __init__(self, operator_namespace): self.operator_namespace = operator_namespace class OperatorInstanceName: def __init__(self, operator_instance_name): self.operator_instance_name = operator_instance_name class ConfigurationName: def __init__(self, name): self.name = name
import matplotlib.pyplot as plt import numpy as np def load_planar_dataset(): np.random.seed(2) m = 400 # number of examples N = int(m/2) # number of points per class D = 2 # dimensionality X = np.zeros((m,D)) # data matrix where each row is a single example Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue) a = 4 # maximum ray of the flower for j in range(2): ix = range(N*j,N*(j+1)) t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius X[ix] = np.c_[r*np.sin(t), r*np.cos(t)] Y[ix] = j return X, Y def plot_decision_boundary(model, X, y): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole grid Z = model(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.ylabel('x2') plt.xlabel('x1') plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# -*- coding: utf-8 -* from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import utool as ut import ubelt as ub import functools # NOQA from six import next from six.moves import zip, range def safe_vstack(tup, default_shape=(0,), default_dtype=np.float): """ stacks a tuple even if it is empty """ try: return np.vstack(tup) except ValueError: return np.empty(default_shape, dtype=default_dtype) def pad_vstack(arrs, fill_value=0): """ Stacks values and pads arrays with different lengths with zeros """ total = max(map(len, arrs)) padded = [np.hstack([a, np.full(total - len(a), fill_value)]) for a in arrs] return np.vstack(padded) def safe_cat(tup, axis=0, default_shape=(0,), default_dtype=np.float): """ stacks a tuple even if it is empty Also deals with numpy bug where cat fails if an element in sequence is empty Example: >>> # DISABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> import vtool_ibeis as vt >>> # test1 >>> tup = [] >>> ut.assert_eq(vt.safe_cat(tup, axis=0).shape, (0,)) >>> # test2 >>> tup = (np.array([[1, 2, 3]]), np.array([[]])) >>> s = vt.safe_cat(tup, axis=0) >>> print(ub.hzcat(['s = ', ub.repr2(s)]) >>> ut.assert_eq(s.shape, (1, 3)) >>> # test3 >>> tup = (np.array([[1, 2, 3]]), np.array([[3, 4, 5]])) >>> s = vt.safe_cat(tup, axis=1) >>> print(ub.hzcat(['s = ', ub.repr2(s)]) >>> ut.assert_eq(s.shape, (1, 6)) >>> # test3 >>> tup = (np.array(1), np.array(2), np.array(3)) >>> s = vt.safe_cat(tup, axis=1) >>> print(ub.hzcat(['s = ', ub.repr2(s)]) >>> ut.assert_eq(s.shape, (1, 6)) """ if tup is None or len(tup) == 0: stack = np.empty(default_shape, dtype=default_dtype) else: try: stack = np.concatenate(tup, axis=axis) except ValueError as ex1: try: # Ensure everything is at least a 1d array tup_ = [np.atleast_1d(np.asarray(a)) for a in tup] # remove empty parts tup_ = [a for a in tup_ if a.size > 0] stack = np.concatenate(tup_, axis=axis) except ValueError: # if axis == 0: # stack = np.hstack(tup) # elif axis == 1: # stack = np.vstack(tup) # elif axis == 3: # stack = np.dstack(tup) # else: raise ex1 return stack # try: # return np.concatenate(tup, axis=axis) # except ValueError: def median_abs_dev(arr_list, **kwargs): """ References: https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr_list - np.median(arr_list, **kwargs)), **kwargs) def argsort_groups(scores_list, reverse=False, rng=np.random, randomize_levels=True): """ Sorts each group normally, but randomizes order of level values. TODO: move to vtool_ibeis Args: scores_list (list): reverse (bool): (default = True) rng (module): random number generator(default = numpy.random) CommandLine: python -m ibeis.init.filter_annots --exec-argsort_groups Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> scores_list = [ >>> np.array([np.nan, np.nan], dtype=np.float32), >>> np.array([np.nan, 2], dtype=np.float32), >>> np.array([4, 1, 1], dtype=np.float32), >>> np.array([7, 3, 3, 0, 9, 7, 5, 8], dtype=np.float32), >>> np.array([2, 4], dtype=np.float32), >>> np.array([np.nan, 4, np.nan, 8, np.nan, 9], dtype=np.float32), >>> ] >>> reverse = True >>> rng = np.random.RandomState(0) >>> idxs_list = argsort_groups(scores_list, reverse, rng) >>> result = 'idxs_list = %s' % (ut.repr4(idxs_list, with_dtype=False),) >>> print(result) """ scores_list_ = [np.array(scores, copy=True).astype(np.float) for scores in scores_list] breakers_list = [rng.rand(len(scores)) for scores in scores_list_] # replace nan with -inf, or inf randomize order between equal values replval = -np.inf if reverse else np.inf # Ensure that nans are ordered last for scores in scores_list_: scores[np.isnan(scores)] = replval # The last column is sorted by first with lexsort scorebreaker_list = [np.array((breakers, scores)) for scores, breakers in zip(scores_list_, breakers_list)] if reverse: idxs_list = [np.lexsort(scorebreaker)[::-1] for scorebreaker in scorebreaker_list] else: idxs_list = [np.lexsort(scorebreaker) for scorebreaker in scorebreaker_list] return idxs_list def check_sift_validity(sift_uint8, lbl=None, verbose=ut.NOT_QUIET): """ checks if a SIFT descriptor is valid """ if lbl is None: lbl = ut.get_varname_from_stack(sift_uint8, N=1) print('[checksift] Checking valididty of %d SIFT descriptors. lbl=%s' % ( sift_uint8.shape[0], lbl)) is_correct_shape = len(sift_uint8.shape) == 2 and sift_uint8.shape[1] == 128 is_correct_dtype = sift_uint8.dtype == np.uint8 if not is_correct_shape: print('[checksift] * incorrect shape = %r' % (sift_uint8.shape,)) elif verbose: print('[checksift] * correct shape = %r' % (sift_uint8.shape,)) if not is_correct_dtype: print('[checksift] * incorrect dtype = %r' % (sift_uint8.dtype,)) elif verbose: print('[checksift] * correct dtype = %r' % (sift_uint8.dtype,)) num_sifts = sift_uint8.shape[0] sift_float01 = sift_uint8 / 512.0 # Check L2 norm sift_norm = np.linalg.norm(sift_float01, axis=1) is_normal = np.isclose(sift_norm, 1.0, atol=.04) bad_locs_norm = np.where(np.logical_not(is_normal))[0] if len(bad_locs_norm) > 0: print('[checksift] * bad norm = %4d/%d' % (len(bad_locs_norm), num_sifts)) else: print('[checksift] * correctly normalized') # Check less than thresh=.2 # This check actually is not valid because the SIFT descriptors is # normalized after it is thresholded #bad_locs_thresh = np.where((sift_float01 > .2).sum(axis=1))[0] #print('[checksift] * bad thresh = %4d/%d' % (len(bad_locs_thresh), num_sifts)) #if len(bad_locs_thresh) > 0: # above_thresh = sift_float01[(sift_float01 > .2)] # print('[checksift] * components under thresh = %d' % (sift_float01 <= 2).sum()) # print('[checksift] * components above thresh stats = ' + # ut.get_stats_str(above_thresh, precision=2)) isok = len(bad_locs_norm) == 0 and is_correct_shape and is_correct_dtype if not isok: print('[checksift] ERROR. SIFT CHECK FAILED') return isok def get_crop_slices(isfill): fill_colxs = [np.where(row)[0] for row in isfill] fill_rowxs = [np.where(col)[0] for col in isfill.T] nRows, nCols = isfill.shape[0:2] filled_columns = intersect1d_reduce(fill_colxs) filled_rows = intersect1d_reduce(fill_rowxs) consec_rows_list = ut.group_consecutives(filled_rows) consec_cols_list = ut.group_consecutives(filled_columns) def get_consec_endpoint(consec_index_list, endpoint): """ consec_index_list = consec_cols_list endpoint = 0 """ for consec_index in consec_index_list: if np.any(np.array(consec_index) == endpoint): return consec_index def get_min_consec_endpoint(consec_rows_list, endpoint): consec_index = get_consec_endpoint(consec_rows_list, endpoint) if consec_index is None: return endpoint return max(consec_index) def get_max_consec_endpoint(consec_rows_list, endpoint): consec_index = get_consec_endpoint(consec_rows_list, endpoint) if consec_index is None: return endpoint + 1 return min(consec_index) consec_rows_top = get_min_consec_endpoint(consec_rows_list, 0) consec_rows_bottom = get_max_consec_endpoint(consec_rows_list, nRows - 1) remove_cols_left = get_min_consec_endpoint(consec_cols_list, 0) remove_cols_right = get_max_consec_endpoint(consec_cols_list, nCols - 1) rowslice = slice(consec_rows_top, consec_rows_bottom) colslice = slice(remove_cols_left, remove_cols_right) return rowslice, colslice def get_undirected_edge_ids(directed_edges): r""" Args: directed_edges (ndarray[ndims=2]): Returns: list: edgeid_list CommandLine: python -m vtool_ibeis.other --exec-get_undirected_edge_ids Example: >>> # DISABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]]) >>> edgeid_list = get_undirected_edge_ids(directed_edges) >>> result = ('edgeid_list = %s' % (ub.repr2(edgeid_list),)) >>> print(result) edgeid_list = [0 0 1 2 3 1 1] """ #import vtool_ibeis as vt undirected_edges = to_undirected_edges(directed_edges) edgeid_list = compute_unique_data_ids(undirected_edges) return edgeid_list def to_undirected_edges(directed_edges, upper=False): assert len(directed_edges.shape) == 2 and directed_edges.shape[1] == 2 #flipped = qaid_arr < daid_arr if upper: flipped = directed_edges.T[0] > directed_edges.T[1] else: flipped = directed_edges.T[0] < directed_edges.T[1] # standardize edge order edges_dupl = directed_edges.copy() edges_dupl[flipped, 0:2] = edges_dupl[flipped, 0:2][:, ::-1] undirected_edges = edges_dupl return undirected_edges def find_best_undirected_edge_indexes(directed_edges, score_arr=None): r""" Args: directed_edges (ndarray[ndims=2]): score_arr (ndarray): Returns: list: unique_edge_xs CommandLine: python -m vtool_ibeis.other --test-find_best_undirected_edge_indexes Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]]) >>> score_arr = np.array([1, 1, 1, 1, 1, 1, 2]) >>> unique_edge_xs = find_best_undirected_edge_indexes(directed_edges, score_arr) >>> result = str(unique_edge_xs) >>> print(result) [0 3 4 6] Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]]) >>> score_arr = None >>> unique_edge_xs = find_best_undirected_edge_indexes(directed_edges, score_arr) >>> result = str(unique_edge_xs) >>> print(result) [0 2 3 4] """ import vtool_ibeis as vt #assert len(directed_edges.shape) == 2 and directed_edges.shape[1] == 2 ##flipped = qaid_arr < daid_arr #flipped = directed_edges.T[0] < directed_edges.T[1] ## standardize edge order #edges_dupl = directed_edges.copy() #edges_dupl[flipped, 0:2] = edges_dupl[flipped, 0:2][:, ::-1] #edgeid_list = vt.compute_unique_data_ids(edges_dupl) edgeid_list = get_undirected_edge_ids(directed_edges) unique_edgeids, groupxs = vt.group_indices(edgeid_list) # if there is more than one edge in a group take the one with the highest score if score_arr is None: unique_edge_xs_list = [groupx[0] for groupx in groupxs] else: assert len(score_arr) == len(directed_edges) score_groups = vt.apply_grouping(score_arr, groupxs) score_argmaxs = [score_group.argmax() for score_group in score_groups] unique_edge_xs_list = [ groupx[argmax] for groupx, argmax in zip(groupxs, score_argmaxs) ] unique_edge_xs = np.array(sorted(unique_edge_xs_list), dtype=np.int32) return unique_edge_xs def argsort_records(arrays, reverse=False): r""" Sorts arrays that form records. Same as lexsort(arrays[::-1]) --- ie. rows are reversed. Args: arrays (ndarray): array of records reverse (bool): (default = False) Returns: ndarray: sortx - sorted indicies CommandLine: python -m vtool_ibeis.other --exec-argsort_records Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arrays = np.array([ >>> [1, 1, 1, 2, 2, 2, 3, 4, 5], >>> [2, 0, 2, 6, 4, 3, 2, 5, 6], >>> [1, 1, 0, 2, 3, 4, 5, 6, 7], >>> ],) >>> reverse = False >>> sortx = argsort_records(arrays, reverse) >>> result = ('sortx = %s' % (str(sortx),)) >>> print('lxsrt = %s' % (np.lexsort(arrays[::-1]),)) >>> print(result) sortx = [1 2 0 5 4 3 6 7 8] """ sorting_records = np.rec.fromarrays(arrays) sort_stride = (-reverse * 2) + 1 sortx = sorting_records.argsort()[::sort_stride] return sortx def unique_rows(arr, directed=True): """ Order or columns does not matter if directed = False """ if directed: idx_list = compute_unique_data_ids(arr) else: idx_list = get_undirected_edge_ids(arr) _, unique_rowx = np.unique(idx_list, return_index=True) unique_arr = arr.take(unique_rowx, axis=0) return unique_arr def compute_ndarray_unique_rowids_unsafe(arr): """ arr = np.random.randint(2, size=(10000, 10)) vt.compute_unique_data_ids_(list(map(tuple, arr))) len(vt.compute_unique_data_ids_(list(map(tuple, arr)))) len(np.unique(vt.compute_unique_data_ids_(list(map(tuple, arr))))) %timeit vt.compute_unique_data_ids_(list(map(tuple, arr))) %timeit compute_ndarray_unique_rowids_unsafe(arr) """ # no checks performed void_dtype = np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])) #assert arr.flags['C_CONTIGUOUS'] arr_void_view = arr.view(void_dtype) unique, rowids = np.unique(arr_void_view, return_inverse=True) return rowids #np.ascontiguousarray(arr).data == arr.data #assert arr.data == arr_void_view.data def nonunique_row_flags(arr): import vtool_ibeis as vt unique_rowx = unique_row_indexes(arr) unique_flags = vt.index_to_boolmask(unique_rowx, len(arr)) nonunique_flags = np.logical_not(unique_flags) return nonunique_flags def nonunique_row_indexes(arr): """ rows that are not unique (does not include the first instance of each pattern) Args: arr (ndarray): 2d array Returns: ndarray: nonunique_rowx SeeAlso: unique_row_indexes nonunique_row_flags CommandLine: python -m vtool_ibeis.other --test-unique_row_indexes Example: >>> # DISABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0], [.534, .432], [.534, .432], [1, 0], [0, 1]]) >>> nonunique_rowx = unique_row_indexes(arr) >>> result = ('nonunique_rowx = %s' % (ub.repr2(nonunique_rowx),)) >>> print(result) nonunique_rowx = np.array([4, 6, 7, 8], dtype=np.int64) """ nonunique_flags = nonunique_row_flags(arr) nonunique_rowx = np.where(nonunique_flags)[0] return nonunique_rowx def compute_unique_data_ids(data): """ This is actually faster than compute_unique_integer_data_ids it seems CommandLine: python -m vtool_ibeis.other --test-compute_unique_data_ids Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> data = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0], [.534, .432], [.534, .432], [1, 0], [0, 1]]) >>> dataid_list = compute_unique_data_ids(data) >>> result = 'dataid_list = ' + ub.repr2(dataid_list, with_dtype=True) >>> print(result) dataid_list = np.array([0, 1, 2, 3, 0, 4, 4, 2, 1], dtype=np.int32) """ # construct a unique id for every edge hashable_rows = [tuple(row_.tolist()) for row_ in data] dataid_list = np.array(compute_unique_data_ids_(hashable_rows), dtype=np.int32) return dataid_list def compute_unique_data_ids_(hashable_rows, iddict_=None): if iddict_ is None: iddict_ = {} for row in hashable_rows: if row not in iddict_: iddict_[row] = len(iddict_) dataid_list = ut.dict_take(iddict_, hashable_rows) return dataid_list def compute_unique_arr_dataids(arr): """ specialized version for speed when arr is an ndarray """ iddict_ = {} hashable_rows = list(map(tuple, arr.tolist())) for row in hashable_rows: if row not in iddict_: iddict_[row] = len(iddict_) dataid_list = np.array([iddict_[row] for row in hashable_rows]) return dataid_list def compute_unique_integer_data_ids(data): r""" This is actually slower than compute_unique_data_ids it seems Example: >>> # DISABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> # build test data >>> data = np.array([[0, 0], [0, 1], [1, 1], [0, 0], [0, 0], [0, 1], [1, 1], [0, 0], [9, 0]]) >>> data = np.random.randint(1000, size=(1000, 2)) >>> # execute function >>> result1 = compute_unique_data_ids(data) >>> result2 = compute_unique_integer_data_ids(data) >>> # verify results >>> print(result) %timeit compute_unique_data_ids(data) %timeit compute_unique_integer_data_ids(data) """ # construct a unique id for every edge ncols = data.shape[1] # get the number of decimal places to shift exp_step = np.ceil(np.log10(data.max())) offsets = [int(10 ** (ix * exp_step)) for ix in reversed(range(0, ncols))] dataid_list = np.array([ sum([ item * offset for item, offset in zip(row, offsets) ]) for row in data]) return dataid_list def trytake(list_, index_list): return None if list_ is None else list_take_(list_, index_list) def list_take_(list_, index_list): if isinstance(list_, np.ndarray): return list_.take(index_list, axis=0) else: return list(ub.take(list_, index_list)) def compress2(arr, flag_list, axis=None, out=None): """ Wrapper around numpy compress that makes the signature more similar to take """ return np.compress(flag_list, arr, axis=axis, out=out) def take2(arr, index_list, axis=None, out=None): """ Wrapper around numpy compress that makes the signature more similar to take """ return np.take(arr, index_list, axis=axis, out=out) def list_compress_(list_, flag_list): if isinstance(list_, np.ndarray): return list_.compress(flag_list, axis=0) else: return list(ub.compress(list_, flag_list)) def index_partition(item_list, part1_items): """ returns two lists. The first are the indecies of items in item_list that are in part1_items. the second is the indices in item_list that are not in part1_items. items in part1_items that are not in item_list are ignored Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> item_list = ['dist', 'fg', 'distinctiveness'] >>> part1_items = ['fg', 'distinctiveness'] >>> part1_indexes, part2_indexes = index_partition(item_list, part1_items) >>> ut.assert_eq(part1_indexes.tolist(), [1, 2]) >>> ut.assert_eq(part2_indexes.tolist(), [0]) """ part1_indexes_ = [ item_list.index(item) for item in part1_items if item in item_list ] part1_indexes = np.array(part1_indexes_) part2_indexes = np.setdiff1d(np.arange(len(item_list)), part1_indexes) # FIXME: use dtype np.int_ part1_indexes = part1_indexes.astype(np.int32) part2_indexes = part2_indexes.astype(np.int32) return part1_indexes, part2_indexes # def partition_Nones(item_list): # """ # Example: # >>> # ENABLE_DOCTEST # >>> from vtool_ibeis.other import * # NOQA # >>> item_list = ['foo', None, None, 'bar'] # >>> part1_indexes, part2_indexes = partition_Nones(item_list) # """ # # part1_indexes_ = ut.list_where(item_list) # part1_indexes_ = [index for index, item in enumerate(item_list) if item is not None] # part1_indexes = np.array(part1_indexes_) # part2_indexes = np.setdiff1d(np.arange(len(item_list)), part1_indexes) # return part1_indexes, part2_indexes def rebuild_partition(part1_vals, part2_vals, part1_indexes, part2_indexes): r""" Inverts work done by index_partition Args: part1_vals (list): part2_vals (list): part1_indexes (dict): part2_indexes (dict): CommandLine: python -m vtool_ibeis.other --test-rebuild_partition Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> item_list = ['dist', 'fg', 'distinctiveness'] >>> part1_items = ['fg', 'distinctiveness'] >>> part1_indexes, part2_indexes = index_partition(item_list, part1_items) >>> part1_vals = ut.take(item_list, part1_indexes) >>> part2_vals = ut.take(item_list, part2_indexes) >>> val_list = rebuild_partition(part1_vals, part2_vals, part1_indexes, part2_indexes) >>> assert val_list == item_list, 'incorrect inversin' >>> print(val_list) """ val_list = [None] * (len(part1_indexes) + len(part2_indexes)) for idx, val in zip(part1_indexes, part1_vals): val_list[idx] = val for idx, val in zip(part2_indexes, part2_vals): val_list[idx] = val return val_list def weighted_average_scoring(fsv, weight_filtxs, nonweight_filtxs): r""" does \frac{\sum_i w^f_i * w^d_i * r_i}{\sum_i w^f_i, w^d_i} to get a weighed average of ratio scores If we normalize the weight part to add to 1 then we can get per-feature scores. References: http://en.wikipedia.org/wiki/Weighted_arithmetic_mean Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> fsv = np.array([ ... [ 0.82992172, 1.56136119, 0.66465378], ... [ 0.8000412 , 2.14719748, 1. ], ... [ 0.80848503, 2.6816361 , 1. ], ... [ 0.86761665, 2.70189977, 1. ], ... [ 0.8004055 , 1.58753884, 0.92178345],]) >>> weight_filtxs = np.array([1, 2], dtype=np.int32) >>> nonweight_filtxs = np.array([0], dtype=np.int32) >>> new_fs = weighted_average_scoring(fsv, weight_filtxs, nonweight_filtxs) >>> result = new_fs >>> print(result) """ weight_fs = fsv.T.take(weight_filtxs, axis=0).T.prod(axis=1) nonweight_fs = fsv.T.take(nonweight_filtxs, axis=0).T.prod(axis=1) weight_fs_norm01 = weight_fs / weight_fs.sum() #weight_fs_norm01[np.isnan(weight_fs_norm01)] = 0.0 # If weights are nan, fill them with zeros weight_fs_norm01 = np.nan_to_num(weight_fs_norm01) new_fs = np.multiply(nonweight_fs, weight_fs_norm01) return new_fs def assert_zipcompress(arr_list, flags_list, axis=None): num_flags = [len(flags) for flags in flags_list] if axis is None: num_arrs = [arr.size for arr in arr_list] else: num_arrs = [arr.shape[axis] for arr in arr_list] assert num_flags == num_arrs, 'not able to zipcompress' def zipcompress_safe(arr_list, flags_list, axis=None): arr_list = list(arr_list) flags_list = list(flags_list) assert_zipcompress(arr_list, flags_list, axis=axis) return zipcompress(arr_list, flags_list, axis) def zipcompress(arr_list, flags_list, axis=None): return [np.compress(flags, arr, axis=axis) for arr, flags in zip(arr_list, flags_list)] def ziptake(arr_list, indices_list, axis=None): return [arr.take(indices, axis=axis) for arr, indices in zip(arr_list, indices_list)] def zipcat(arr1_list, arr2_list, axis=None): r""" Args: arr1_list (list): arr2_list (list): axis (None): (default = None) Returns: list: CommandLine: python -m vtool_ibeis.other --exec-zipcat --show Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr1_list = [np.array([0, 0, 0]), np.array([0, 0, 0, 0])] >>> arr2_list = [np.array([1, 1, 1]), np.array([1, 1, 1, 1])] >>> axis = None >>> arr3_list = zipcat(arr1_list, arr2_list, axis) >>> arr3_list0 = zipcat(arr1_list, arr2_list, axis=0) >>> arr3_list1 = zipcat(arr1_list, arr2_list, axis=1) >>> arr3_list2 = zipcat(arr1_list, arr2_list, axis=2) >>> print('arr3_list = %s' % (ut.repr3(arr3_list),)) >>> print('arr3_list0 = %s' % (ut.repr3(arr3_list0),)) >>> print('arr3_list2 = %s' % (ut.repr3(arr3_list2),)) """ import vtool_ibeis as vt assert len(arr1_list) == len(arr2_list), 'lists must correspond' if axis is None: arr1_iter = arr1_list arr2_iter = arr2_list else: arr1_iter = [vt.atleast_nd(arr1, axis + 1) for arr1 in arr1_list] arr2_iter = [vt.atleast_nd(arr2, axis + 1) for arr2 in arr2_list] arrs_iter = list(zip(arr1_iter, arr2_iter)) arr3_list = [np.concatenate(arrs, axis=axis) for arrs in arrs_iter] return arr3_list def atleast_nd(arr, n, tofront=False): r""" View inputs as arrays with at least n dimensions. TODO: Commit to numpy Args: arr (array_like): One array-like object. Non-array inputs are converted to arrays. Arrays that already have n or more dimensions are preserved. n (int): tofront (bool): if True new dims are added to the front of the array CommandLine: python -m vtool_ibeis.other --exec-atleast_nd --show Returns: ndarray : An array with ``a.ndim >= n``. Copies are avoided where possible, and views with three or more dimensions are returned. For example, a 1-D array of shape ``(N,)`` becomes a view of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a view of shape ``(M, N, 1)``. See Also: atleast_1d, atleast_2d, atleast_3d Example0: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> n = 2 >>> arr = np.array([1, 1, 1]) >>> arr_ = atleast_nd(arr, n) >>> result = ub.repr2(arr_.tolist()) >>> print(result) Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> n = 4 >>> arr1 = [1, 1, 1] >>> arr2 = np.array(0) >>> arr3 = np.array([[[[[1]]]]]) >>> arr1_ = atleast_nd(arr1, n) >>> arr2_ = atleast_nd(arr2, n) >>> arr3_ = atleast_nd(arr3, n) >>> result1 = ub.repr2(arr1_.tolist()) >>> result2 = ub.repr2(arr2_.tolist()) >>> result3 = ub.repr2(arr3_.tolist()) >>> result = '\n'.join([result1, result2, result3]) >>> print(result) """ arr_ = np.asanyarray(arr) ndims = len(arr_.shape) if n is not None and ndims < n: # append the required number of dimensions to the end if tofront: expander = (None,) * (n - ndims) + (Ellipsis,) else: expander = (Ellipsis,) + (None,) * (n - ndims) arr_ = arr_[expander] return arr_ def ensure_shape(arr, dimshape): """ Ensures that an array takes a certain shape. The total size of the array must not change. Args: arr (ndarray): array to change the shape of dimshape (tuple): desired shape (Nones can be used to broadcast dimensions) Returns: ndarray: arr_ - the input array, which has been modified inplace. CommandLine: python -m vtool_ibeis.other ensure_shape Doctest: >>> from vtool_ibeis.other import * # NOQA >>> arr = np.zeros((7, 7)) >>> dimshape = (None, None, 3) >>> arr2 = ensure_shape(np.array([[1, 2]]), (None, 2)) >>> assert arr2.shape == (1, 2) >>> arr3 = ensure_shape(np.array([]), (None, 2)) >>> assert arr3.shape == (0, 2) """ if isinstance(dimshape, tuple): n = len(dimshape) else: n = dimshape dimshape = None arr_ = atleast_nd(arr, n) if dimshape is not None: newshape = tuple([ d1 if d2 is None else d2 for d1, d2 in zip(arr_.shape, dimshape)]) arr_.shape = newshape return arr_ def significant_shape(arr): """ find the shape without trailing 1's """ sig_dim = 0 for i, dim in enumerate(arr.shape, start=1): if dim != 1: sig_dim = i sig_shape = arr.shape[0:sig_dim] return sig_shape def atleast_shape(arr, dimshape): """ Ensures that an array takes a certain shape. The total size of the array must not change. Args: arr (ndarray): array to change the shape of dimshape (tuple): desired shape (Nones can be used to broadcast dimensions) Returns: ndarray: arr_ - the input array, which has been modified inplace. CommandLine: python -m vtool_ibeis.other ensure_shape Doctest: >>> from vtool_ibeis.other import * # NOQA >>> arr = np.zeros((7, 7)) >>> assert atleast_shape(arr, (1, 1, 3,)).shape == (7, 7, 3) >>> assert atleast_shape(arr, (1, 1, 2, 4,)).shape == (7, 7, 2, 4) >>> assert atleast_shape(arr, (1, 1,)).shape == (7, 7,) >>> assert atleast_shape(arr, (1, 1, 1)).shape == (7, 7, 1) >>> assert atleast_shape(np.zeros(()), (1,)).shape == (1,) >>> assert atleast_shape(np.zeros(()), tuple()).shape == tuple() >>> assert atleast_shape(np.zeros(()), (1, 2, 3,)).shape == (1, 2, 3) >>> ut.assert_raises(ValueError, atleast_shape, arr, (2, 2)) >>> assert atleast_shape(np.zeros((7, 7, 3)), (1, 1, 3)).shape == (7, 7, 3) >>> ut.assert_raises(ValueError, atleast_shape, np.zeros((7, 7, 3)), (1, 1, 4)) """ n = len(dimshape) sig_shape = significant_shape(arr) if n < len(sig_shape): raise ValueError( 'len(dimshape)={} must be >= than ' 'len(significant_shape(arr)={})'.format(n, sig_shape)) arr_ = atleast_nd(arr, n) for d1, d2 in zip(arr_.shape, dimshape): if d2 > 1 and d1 != 1 and d1 != d2: raise ValueError('cannot broadcast {} to {}'.format( arr_.shape, dimshape )) reps = tuple(1 if d2 is None or (d1 == d2) else d2 for d1, d2 in zip(arr_.shape, dimshape)) arr_ = np.tile(arr_, reps) return arr_ def atleast_3channels(arr, copy=True): r""" Ensures that there are 3 channels in the image Args: arr (ndarray[N, M, ...]): the image copy (bool): Always copies if True, if False, then copies only when the size of the array must change. Returns: ndarray: with shape (N, M, C), where C in {3, 4} CommandLine: python -m vtool_ibeis.other atleast_3channels Doctest: >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt >>> assert atleast_3channels(np.zeros((10, 10))).shape[-1] == 3 >>> assert atleast_3channels(np.zeros((10, 10, 1))).shape[-1] == 3 >>> assert atleast_3channels(np.zeros((10, 10, 3))).shape[-1] == 3 >>> assert atleast_3channels(np.zeros((10, 10, 4))).shape[-1] == 4 """ # atleast_shape(arr, (None, None, 3)) ndims = len(arr.shape) if ndims == 2: res = np.tile(arr[:, :, None], 3) return res elif ndims == 3: h, w, c = arr.shape if c == 1: res = np.tile(arr, 3) elif c in [3, 4]: res = arr.copy() if copy else arr else: raise ValueError('Cannot handle ndims={}'.format(ndims)) else: raise ValueError('Cannot handle arr.shape={}'.format(arr.shape)) return res def iter_reduce_ufunc(ufunc, arr_iter, out=None): """ constant memory iteration and reduction applys ufunc from left to right over the input arrays Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr_list = [ ... np.array([0, 1, 2, 3, 8, 9]), ... np.array([4, 1, 2, 3, 4, 5]), ... np.array([0, 5, 2, 3, 4, 5]), ... np.array([1, 1, 6, 3, 4, 5]), ... np.array([0, 1, 2, 7, 4, 5]) ... ] >>> memory = np.array([9, 9, 9, 9, 9, 9]) >>> gen_memory = memory.copy() >>> def arr_gen(arr_list, gen_memory): ... for arr in arr_list: ... gen_memory[:] = arr ... yield gen_memory >>> print('memory = %r' % (memory,)) >>> print('gen_memory = %r' % (gen_memory,)) >>> ufunc = np.maximum >>> res1 = iter_reduce_ufunc(ufunc, iter(arr_list), out=None) >>> res2 = iter_reduce_ufunc(ufunc, iter(arr_list), out=memory) >>> res3 = iter_reduce_ufunc(ufunc, arr_gen(arr_list, gen_memory), out=memory) >>> print('res1 = %r' % (res1,)) >>> print('res2 = %r' % (res2,)) >>> print('res3 = %r' % (res3,)) >>> print('memory = %r' % (memory,)) >>> print('gen_memory = %r' % (gen_memory,)) >>> assert np.all(res1 == res2) >>> assert np.all(res2 == res3) """ # Get first item in iterator try: initial = next(arr_iter) except StopIteration: return None # Populate the outvariable if specified otherwise make a copy of the first # item to be the output memory if out is not None: out[:] = initial else: out = initial.copy() # Iterate and reduce for arr in arr_iter: ufunc(out, arr, out=out) return out def clipnorm(arr, min_, max_, out=None): """ normalizes arr to the range 0 to 1 using min_ and max_ as clipping bounds """ if max_ == 1 and min_ == 0: if out is not None: out[:] = arr else: out = arr.copy() return out out_args = tuple() if out is None else (out,) arr_ = np.subtract(arr, min_, *out_args) arr_ = np.divide(arr_, max_ - min_, *out_args) arr_ = np.clip(arr_, 0.0, 1.0, *out_args) return arr_ def intersect1d_reduce(arr_list, assume_unique=False): arr_iter = iter(arr_list) out = next(arr_iter) for arr in arr_iter: out = np.intersect1d(out, arr, assume_unique=assume_unique) return out def componentwise_dot(arr1, arr2): """ a dot product is a componentwise multiplication of two vector and then a sum. Args: arr1 (ndarray) arr2 (ndarray): Returns: ndarray: cosangle Example: >>> # DISABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> np.random.seed(0) >>> arr1 = np.random.rand(3, 128) >>> arr1 = arr1 / np.linalg.norm(arr1, axis=1)[:, None] >>> arr2 = arr1 >>> cosangle = componentwise_dot(arr1, arr2) >>> result = str(cosangle) >>> print(result) [ 1. 1. 1.] """ cosangle = np.multiply(arr1, arr2).sum(axis=-1).T return cosangle def intersect2d_indices(A, B): r""" Args: A (ndarray[ndims=2]): B (ndarray[ndims=2]): Returns: tuple: (ax_list, bx_list) CommandLine: python -m vtool_ibeis.other --test-intersect2d_indices Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> # build test data >>> A = np.array([[ 158, 171], [ 542, 297], [ 955, 1113], [ 255, 1254], [ 976, 1255], [ 170, 1265]]) >>> B = np.array([[ 117, 211], [ 158, 171], [ 255, 1254], [ 309, 328], [ 447, 1148], [ 750, 357], [ 976, 1255]]) >>> # execute function >>> (ax_list, bx_list) = intersect2d_indices(A, B) >>> # verify results >>> result = str((ax_list, bx_list)) >>> print(result) """ flag_list1, flag_list2 = intersect2d_flags(A, B) ax_list = np.flatnonzero(flag_list1) bx_list = np.flatnonzero(flag_list2) return ax_list, bx_list def intersect2d_flags(A, B): r""" Checks intersection of rows of A against rows of B Args: A (ndarray[ndims=2]): B (ndarray[ndims=2]): Returns: tuple: (flag_list1, flag_list2) CommandLine: python -m vtool_ibeis.other --test-intersect2d_flags SeeAlso: np.in1d - the one dimensional version Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> A = np.array([[609, 307], [ 95, 344], [ 1, 690]]) >>> B = np.array([[ 422, 1148], [ 422, 968], [ 481, 1148], [ 750, 1132], [ 759, 159]]) >>> (flag_list1, flag_list2) = intersect2d_flags(A, B) >>> result = str((flag_list1, flag_list2)) >>> print(result) """ A_, B_, C_ = intersect2d_structured_numpy(A, B) flag_list1 = flag_intersection(A_, C_) flag_list2 = flag_intersection(B_, C_) return flag_list1, flag_list2 def flag_intersection(arr1, arr2): r""" Flags the rows in `arr1` that contain items in `arr2` Returns: ndarray: flags where len(flags) == len(arr1) Example0: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr1 = np.array([0, 1, 2, 3, 4, 5]) >>> arr2 = np.array([2, 6, 4]) >>> flags = flag_intersection(arr1, arr2) >>> assert len(flags) == len(arr1) >>> result = ('flags = %s' % (ub.repr2(flags),)) >>> print(result) Example1: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> import vtool_ibeis as vt >>> arr1 = np.array([[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5]]) >>> arr2 = np.array([[0, 2], [0, 6], [0, 4], [3, 0]]) >>> arr1, arr2 = vt.structure_rows(arr1, arr2) >>> flags = flag_intersection(arr1, arr2) >>> assert len(flags) == len(arr1) >>> result = ('flags = %s' % (ub.repr2(flags),)) >>> print(result) Example2: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr1 = np.array([0, 1, 2, 3, 4, 5]) >>> arr2 = np.array([]) >>> flags = flag_intersection(arr1, arr2) >>> assert len(flags) == len(arr1) >>> flags = flag_intersection(np.array([]), np.array([2, 6, 4])) >>> assert len(flags) == 0 Timeit: >>> setup = ut.codeblock( >>> r''' import vtool_ibeis as vt import numpy as np rng = np.random.RandomState(0) arr1 = rng.randint(0, 100, 100000).reshape(-1, 2) arr2 = rng.randint(0, 100, 1000).reshape(-1, 2) arr1_, arr2_ = vt.structure_rows(arr1, arr2) ''') >>> stmt_list = ut.codeblock( >>> ''' np.array([row in arr2_ for row in arr1_]) np.logical_or.reduce([arr1_ == row_ for row_ in arr2_]).ravel() vt.iter_reduce_ufunc(np.logical_or, (arr1_ == row_ for row_ in arr2_)).ravel() ''').split('\n') >>> out = ut.timeit_compare(stmt_list, setup=setup, iterations=3) """ import vtool_ibeis as vt if arr1.size == 0 or arr2.size == 0: flags = np.full(arr1.shape[0], False, dtype=np.bool) #return np.empty((0,), dtype=np.bool) else: # flags = np.logical_or.reduce([arr1 == row for row in arr2]).T[0] flags = vt.iter_reduce_ufunc(np.logical_or, (arr1 == row_ for row_ in arr2)).ravel() return flags def structure_rows(*arrs): r""" CommandLine: python -m vtool_ibeis.other structure_rows SeeAlso: unstructure_rows Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr1 = np.array([[609, 307], [ 95, 344], [ 1, 690]]) >>> arr2 = np.array([[ 422, 1148], [ 422, 968], [ 481, 1148], [ 750, 1132], [ 759, 159]]) >>> arrs = (arr1, arr2) >>> structured_arrs = structure_rows(*arrs) >>> unstructured_arrs = unstructure_rows(*structured_arrs) >>> assert np.all(unstructured_arrs[0] == arrs[0]) >>> assert np.all(unstructured_arrs[1] == arrs[1]) >>> union_ = np.union1d(*structured_arrs) >>> union, = unstructure_rows(union_) >>> assert len(union.shape) == 2 """ arr0 = arrs[0] ncols = arr0.shape[1] dtype = {'names': ['f%d' % (i,) for i in range(ncols)], 'formats': ncols * [arr0.dtype]} for arr in arrs: assert len(arr.shape) == 2, 'arrays must be 2d' assert arr.dtype == arr0.dtype, 'arrays must share the same dtype' assert arr.shape[1] == ncols, 'arrays must share column shape' structured_arrs = [] for arr in arrs: arr_ = np.ascontiguousarray(arr).view(dtype) structured_arrs.append(arr_) return structured_arrs def unstructure_rows(*structured_arrs): r""" SeeAlso: structure_rows """ # TODO: assert arr.dtype.fields are all the same type unstructured_arrs = [arr.view(list(arr.dtype.fields.values())[0][0]) for arr in structured_arrs] unstructured_arrs = [] for arr_ in structured_arrs: dtype = list(arr_.dtype.fields.values())[0][0] arr = arr_.view(dtype).reshape(-1, 2) unstructured_arrs.append(arr) return unstructured_arrs def intersect2d_structured_numpy(arr1, arr2, assume_unique=False): """ Args: arr1: unstructured 2d array arr2: unstructured 2d array Returns: A_, B_, C_ - structured versions of arr1, and arr2, and their structured intersection References: http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array http://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays """ ncols = arr1.shape[1] assert arr1.dtype == arr2.dtype, ( 'arr1 and arr2 must have the same dtypes.' 'arr1.dtype=%r, arr2.dtype=%r' % (arr1.dtype, arr2.dtype)) # [('f%d' % i, arr1.dtype) for i in range(ncols)] #dtype = np.dtype([('f%d' % i, arr1.dtype) for i in range(ncols)]) #dtype = {'names': ['f{}'.format(i) for i in range(ncols)], # 'formats': ncols * [arr1.dtype]} dtype = {'names': ['f%d' % (i,) for i in range(ncols)], 'formats': ncols * [arr1.dtype]} #try: A_ = np.ascontiguousarray(arr1).view(dtype) B_ = np.ascontiguousarray(arr2).view(dtype) C_ = np.intersect1d(A_, B_, assume_unique=assume_unique) #C = np.intersect1d(arr1.view(dtype), # arr2.view(dtype), # assume_unique=assume_unique) #except ValueError: # C = np.intersect1d(A.copy().view(dtype), # B.copy().view(dtype), # assume_unique=assume_unique) return A_, B_, C_ def intersect2d_numpy(A, B, assume_unique=False, return_indices=False): """ References:: http://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays/8317155#8317155 Args: A (ndarray[ndims=2]): B (ndarray[ndims=2]): assume_unique (bool): Returns: ndarray[ndims=2]: C CommandLine: python -m vtool_ibeis.other --test-intersect2d_numpy Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> # build test data >>> A = np.array([[ 0, 78, 85, 283, 396, 400, 403, 412, 535, 552], ... [152, 98, 32, 260, 387, 285, 22, 103, 55, 261]]).T >>> B = np.array([[403, 85, 412, 85, 815, 463, 613, 552], ... [ 22, 32, 103, 116, 188, 199, 217, 254]]).T >>> assume_unique = False >>> # execute function >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True) >>> # verify results >>> result = str((C.T, Ax, Bx)) >>> print(result) (array([[ 85, 403, 412], [ 32, 22, 103]]), array([2, 6, 7]), array([0, 1, 2])) Example2: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> A = np.array([[1, 2, 3], [1, 1, 1]]) >>> B = np.array([[1, 2, 3], [1, 2, 14]]) >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True) >>> result = str((C, Ax, Bx)) >>> print(result) (array([[1, 2, 3]]), array([0]), array([0])) """ nrows, ncols = A.shape A_, B_, C_ = intersect2d_structured_numpy(A, B, assume_unique) # This last bit is optional if you're okay with "C" being a structured array... C = C_.view(A.dtype).reshape(-1, ncols) if return_indices: ax_list = np.flatnonzero(flag_intersection(A_, C_)) bx_list = np.flatnonzero(flag_intersection(B_, C_)) return C, ax_list, bx_list else: return C def nearest_point(x, y, pts, mode='random'): """ finds the nearest point(s) in pts to (x, y) """ dists = (pts.T[0] - x) ** 2 + (pts.T[1] - y) ** 2 fx = dists.argmin() mindist = dists[fx] other_fx = np.where(mindist == dists)[0] if len(other_fx) > 0: if mode == 'random': np.random.shuffle(other_fx) fx = other_fx[0] if mode == 'all': fx = other_fx if mode == 'first': fx = fx return fx, mindist def get_uncovered_mask(covered_array, covering_array): r""" Args: covered_array (ndarray): covering_array (ndarray): Returns: ndarray: flags CommandLine: python -m vtool_ibeis.other --test-get_uncovered_mask Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> covered_array = [1, 2, 3, 4, 5] >>> covering_array = [2, 4, 5] >>> flags = get_uncovered_mask(covered_array, covering_array) >>> result = str(flags) >>> print(result) [ True False True False False] Example2: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> covered_array = [1, 2, 3, 4, 5] >>> covering_array = [] >>> flags = get_uncovered_mask(covered_array, covering_array) >>> result = str(flags) >>> print(result) [ True True True True True] Example3: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> covered_array = np.array([ ... [1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... ], dtype=np.int32) >>> covering_array = [2, 4, 5] >>> flags = get_uncovered_mask(covered_array, covering_array) >>> result = ub.repr2(flags, with_dtype=True) >>> print(result) np.array([[ True, False, True], [False, False, True], [ True, True, True]], dtype=bool) Ignore:: covering_array = [1, 2, 3, 4, 5, 6, 7] %timeit get_uncovered_mask(covered_array, covering_array) 100000 loops, best of 3: 18.6 µs per loop %timeit get_uncovered_mask2(covered_array, covering_array) 100000 loops, best of 3: 16.9 µs per loop """ import vtool_ibeis as vt if len(covering_array) == 0: return np.ones(np.shape(covered_array), dtype=np.bool) else: flags_iter = (np.not_equal(covered_array, item) for item in covering_array) mask_array = vt.iter_reduce_ufunc(np.logical_and, flags_iter) return mask_array #if len(covering_array) == 0: # return np.ones(np.shape(covered_array), dtype=np.bool) #else: # flags_list = (np.not_equal(covered_array, item) for item in covering_array) # mask_array = and_lists(*flags_list) # return mask_array #def get_uncovered_mask2(covered_array, covering_array): # if len(covering_array) == 0: # return np.ones(np.shape(covered_array), dtype=np.bool) # else: # flags_iter = (np.not_equal(covered_array, item) for item in covering_array) # mask_array = vt.iter_reduce_ufunc(np.logical_and, flags_iter) # return mask_array def get_covered_mask(covered_array, covering_array): return ~get_uncovered_mask(covered_array, covering_array) def mult_lists(*args): return np.multiply.reduce(args) def or_lists(*args): """ Like np.logical_and, but can take more than 2 arguments SeeAlso: and_lists """ flags = np.logical_or.reduce(args) return flags def and_lists(*args): """ Like np.logical_and, but can take more than 2 arguments CommandLine: python -m vtool_ibeis.other --test-and_lists SeeAlso: or_lists Example1: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arg1 = np.array([1, 1, 1, 1,]) >>> arg2 = np.array([1, 1, 0, 1,]) >>> arg3 = np.array([0, 1, 0, 1,]) >>> args = (arg1, arg2, arg3) >>> flags = and_lists(*args) >>> result = str(flags) >>> print(result) [False True False True] Example2: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> size = 10000 >>> rng = np.random.RandomState(0) >>> arg1 = rng.randint(2, size=size) >>> arg2 = rng.randint(2, size=size) >>> arg3 = rng.randint(2, size=size) >>> args = (arg1, arg2, arg3) >>> flags = and_lists(*args) >>> # ensure equal division >>> segments = 5 >>> validx = np.where(flags)[0] >>> endx = int(segments * (validx.size // (segments))) >>> parts = np.split(validx[:endx], segments) >>> result = str(list(map(np.sum, parts))) >>> print(result) [243734, 714397, 1204989, 1729375, 2235191] %timeit reduce(np.logical_and, args) %timeit np.logical_and.reduce(args) # wins with more data """ return np.logical_and.reduce(args) def rowwise_operation(arr1, arr2, op): """ DEPRICATE THIS IS POSSIBLE WITH STRICTLY BROADCASTING AND USING np.newaxis DEPRICATE, numpy has better ways of doing this. Is the rowwise name correct? Should it be colwise? performs an operation between an (N x A x B ... x Z) array with an (N x 1) array """ # FIXME: not sure this is the correct terminology assert arr1.shape[0] == arr2.shape[0] broadcast_dimensions = arr1.shape[1:] # need padding for tileshape = tuple(list(broadcast_dimensions) + [1]) arr2_ = np.rollaxis(np.tile(arr2, tileshape), -1) rowwise_result = op(arr1, arr2_) return rowwise_result def colwise_operation(arr1, arr2, op): arr1T = arr1.T arr2T = arr2.T rowwise_result = rowwise_operation(arr1T, arr2T, op) colwise_result = rowwise_result.T return colwise_result def compare_matrix_columns(matrix, columns, comp_op=np.equal, logic_op=np.logical_or): """ REPLACE WITH: qfx2_invalid = logic_op.reduce([comp_op([:, None], qfx2_normnid) for col1 in qfx2_topnid.T]) """ # FIXME: Generalize #row_matrix = matrix.T #row_list = columns.T return compare_matrix_to_rows(matrix.T, columns.T, comp_op=comp_op, logic_op=logic_op).T def compare_matrix_to_rows(row_matrix, row_list, comp_op=np.equal, logic_op=np.logical_or): """ Compares each row in row_list to each row in row matrix using comp_op Both must have the same number of columns. Performs logic_op on the results of each individual row SeeAlso: ibeis.algo.hots.nn_weights.mark_name_valid_normalizers compop = np.equal logic_op = np.logical_or """ row_result_list = [np.array([comp_op(matrow, row) for matrow in row_matrix]) for row in row_list] output = row_result_list[0] for row_result in row_result_list[1:]: logic_op(output, row_result, out=output) #output = logic_op(output, row_result) return output def norm01(array, dim=None): """ normalizes a numpy array from 0 to 1 based in its extent Args: array (ndarray): dim (int): Returns: ndarray: Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> array = np.array([ 22, 1, 3, 2, 10, 42, ]) >>> dim = None >>> array_norm = norm01(array, dim) >>> result = ub.repr2(array_norm, precision=3) >>> print(result) """ if not ut.is_float(array): array = array.astype(np.float32) array_max = array.max(dim) array_min = array.min(dim) array_exnt = np.subtract(array_max, array_min) array_norm = np.divide(np.subtract(array, array_min), array_exnt) return array_norm def weighted_geometic_mean_unnormalized(data, weights): import vtool_ibeis as vt terms = [x ** w for x, w in zip(data, weights)] termprod = vt.iter_reduce_ufunc(np.multiply, iter(terms)) return termprod def weighted_geometic_mean(data, weights): r""" Args: data (list of ndarrays): weights (ndarray): Returns: ndarray: gmean_ CommandLine: python -m vtool_ibeis.other --test-weighted_geometic_mean References: https://en.wikipedia.org/wiki/Weighted_geometric_mean SeeAlso: scipy.stats.mstats.gmean Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> data = [.9, .5] >>> weights = np.array([1.0, .5]) >>> gmean_ = weighted_geometic_mean(data, weights) >>> result = ('gmean_ = %.3f' % (gmean_,)) >>> print(result) gmean_ = 0.740 Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> rng = np.random.RandomState(0) >>> img1 = rng.rand(4, 4) >>> img2 = rng.rand(4, 4) >>> data = [img1, img2] >>> weights = np.array([.5, .5]) >>> gmean_ = weighted_geometic_mean(data, weights) >>> result = ub.hzcat(['gmean_ = ', ub.repr2(gmean_, precision=2, with_dtype=True)]) >>> print(result) Ignore: res1 = ((img1 ** .5 * img2 ** .5)) ** 1 res2 = np.sqrt(img1 * img2) """ import vtool_ibeis as vt terms = [np.asarray(x ** w) for x, w in zip(data, weights)] termprod = vt.iter_reduce_ufunc(np.multiply, iter(terms)) exponent = 1 / np.sum(weights) gmean_ = termprod ** exponent return gmean_ def grab_webcam_image(): """ References: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html CommandLine: python -m vtool_ibeis.other --test-grab_webcam_image --show Example: >>> # SCRIPT >>> from vtool_ibeis.other import * # NOQA >>> import vtool_ibeis as vt >>> img = grab_webcam_image() >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt >>> pt.imshow(img) >>> vt.imwrite('webcap.jpg', img) >>> ut.show_if_requested() """ import cv2 cap = cv2.VideoCapture(0) # Capture frame-by-frame ret, img = cap.read() # When everything done, release the capture cap.release() return img #def xor_swap(arr1, arr2, inplace=True): # if not inplace: # arr1 = arr1.copy() # arr2 = arr2.copy() # np.bitwise_xor(arr1, arr2, out=arr1) # np.bitwise_xor(arr1, arr2, out=arr2) # np.bitwise_xor(arr1, arr2, out=arr1) # return arr1, arr2 def find_first_true_indices(flags_list): """ TODO: move to vtool_ibeis returns a list of indexes where the index is the first True position in the corresponding sublist or None if it does not exist in other words: for each row finds the smallest True column number or None Args: flags_list (list): list of lists of booleans CommandLine: python -m utool.util_list --test-find_first_true_indices Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> # build test data >>> flags_list = [[True, False, True], ... [False, False, False], ... [False, True, True], ... [False, False, True]] >>> # execute function >>> index_list = find_first_true_indices(flags_list) >>> # verify results >>> result = str(index_list) >>> print(result) [0, None, 1, 2] """ def tryget_fisrt_true(flags): index_list = np.where(flags)[0] index = None if len(index_list) == 0 else index_list[0] return index index_list = [tryget_fisrt_true(flags) for flags in flags_list] return index_list def find_k_true_indicies(flags_list, k): r""" Uses output of either this function or find_first_true_indices to find the next index of true flags Args: flags_list (list): list of lists of booleans CommandLine: python -m utool.util_list --test-find_next_true_indices Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> flags_list = [[False, False, True], ... [False, False, False], ... [False, True, True], ... [True, True, True]] >>> k = 2 >>> indices = find_k_true_indicies(flags_list, k) >>> result = str(indices) >>> print(result) [array([2]), None, array([1, 2]), array([0, 1])] """ if False: import vtool_ibeis as vt flags_list = np.array(flags_list) rowxs, colxs = np.where(flags_list) first_k_groupxs = [groupx[0:k] for groupx in vt.group_indices(rowxs)[1]] chosen_xs = np.hstack(first_k_groupxs) flat_xs = np.ravel_multi_index((rowxs.take(chosen_xs), colxs.take(chosen_xs)), flags_list.shape) flat_xs def tryget_k_true(flags): index_list = np.where(flags)[0] index = None if len(index_list) == 0 else index_list[0:k] return index index_list = [tryget_k_true(flags) for flags in flags_list] return index_list def find_next_true_indices(flags_list, offset_list): r""" Uses output of either this function or find_first_true_indices to find the next index of true flags Args: flags_list (list): list of lists of booleans CommandLine: python -m utool.util_list --test-find_next_true_indices Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> # build test data >>> flags_list = [[True, False, True], ... [False, False, False], ... [False, True, True], ... [False, False, True]] >>> offset_list = find_first_true_indices(flags_list) >>> # execute function >>> index_list = find_next_true_indices(flags_list, offset_list) >>> # verify results >>> result = str(index_list) >>> print(result) [2, None, 2, None] """ def tryget_next_true(flags, offset_): offset = offset_ + 1 relative_flags = flags[offset:] rel_index_list = np.where(relative_flags)[0] index = None if len(rel_index_list) == 0 else rel_index_list[0] + offset return index index_list = [None if offset is None else tryget_next_true(flags, offset) for flags, offset in zip(flags_list, offset_list)] return index_list def ensure_rng(seed=None): """ Returns a numpy random number generator given a seed. """ if seed is None: rng = np.random elif isinstance(seed, np.random.RandomState): rng = seed else: rng = np.random.RandomState(seed) return rng def safe_extreme(arr, op, fill=np.nan, finite=False, nans=True): """ Applies an exterme operation to an 1d array (typically max/min) but ensures a value is always returned even in operations without identities. The default identity must be specified using the `fill` argument. Args: arr (ndarray): 1d array to take extreme of op (func): vectorized operation like np.max to apply to array fill (float): return type if arr has no elements (default = nan) finite (bool): if True ignores non-finite values (default = False) nans (bool): if False ignores nans (default = True) """ if arr is None: extreme = fill else: arr = np.asarray(arr) if finite: arr = arr.compress(np.isfinite(arr)) if not nans: arr = arr.compress(np.logical_not(np.isnan(arr))) if len(arr) == 0: extreme = fill else: extreme = op(arr) return extreme def safe_argmax(arr, fill=np.nan, finite=False, nans=True): """ Doctest: >>> from vtool_ibeis.other import * >>> assert safe_argmax([np.nan, np.nan], nans=False) == 0 >>> assert safe_argmax([-100, np.nan], nans=False) == 0 >>> assert safe_argmax([np.nan, -100], nans=False) == 1 >>> assert safe_argmax([-100, 0], nans=False) == 1 >>> assert np.isnan(safe_argmax([])) """ if len(arr) == 0: return fill extreme = safe_max(arr, fill=fill, finite=finite, nans=nans) if np.isnan(extreme): arg_extreme = np.where(np.isnan(arr))[0][0] else: arg_extreme = np.where(arr == extreme)[0][0] return arg_extreme def safe_max(arr, fill=np.nan, finite=False, nans=True): r""" Args: arr (ndarray): 1d array to take max of fill (float): return type if arr has no elements (default = nan) finite (bool): if True ignores non-finite values (default = False) nans (bool): if False ignores nans (default = True) CommandLine: python -m vtool_ibeis.other safe_max --show Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arrs = [[], [np.nan], [-np.inf, np.nan, np.inf], [np.inf], [np.inf, 1], [0, 1]] >>> arrs = [np.array(arr) for arr in arrs] >>> fill = np.nan >>> results1 = [safe_max(arr, fill, finite=False, nans=True) for arr in arrs] >>> results2 = [safe_max(arr, fill, finite=True, nans=True) for arr in arrs] >>> results3 = [safe_max(arr, fill, finite=True, nans=False) for arr in arrs] >>> results4 = [safe_max(arr, fill, finite=False, nans=False) for arr in arrs] >>> results = [results1, results2, results3, results4] >>> result = ('results = %s' % (ub.repr2(results, nl=1),)) >>> print(result) results = [ [nan, nan, nan, inf, inf, 1], [nan, nan, nan, nan, 1.0, 1], [nan, nan, nan, nan, 1.0, 1], [nan, nan, inf, inf, inf, 1], ] """ return safe_extreme(arr, np.max, fill, finite, nans) def safe_min(arr, fill=np.nan, finite=False, nans=True): """ Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arrs = [[], [np.nan], [-np.inf, np.nan, np.inf], [np.inf], [np.inf, 1], [0, 1]] >>> arrs = [np.array(arr) for arr in arrs] >>> fill = np.nan >>> results1 = [safe_min(arr, fill, finite=False, nans=True) for arr in arrs] >>> results2 = [safe_min(arr, fill, finite=True, nans=True) for arr in arrs] >>> results3 = [safe_min(arr, fill, finite=True, nans=False) for arr in arrs] >>> results4 = [safe_min(arr, fill, finite=False, nans=False) for arr in arrs] >>> results = [results1, results2, results3, results4] >>> result = ('results = %s' % (ub.repr2(results, nl=1),)) >>> print(result) results = [ [nan, nan, nan, inf, 1.0, 0], [nan, nan, nan, nan, 1.0, 0], [nan, nan, nan, nan, 1.0, 0], [nan, nan, -inf, inf, 1.0, 0], ] """ return safe_extreme(arr, np.min, fill, finite, nans) def safe_div(a, b): return None if a is None or b is None else a / b def multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func): r""" Slow version of multigroup_lookup. Makes a call to custom_func for each item in zip(keys_list, subkeys_list). SeeAlso: vt.multigroup_lookup """ data_lists = [] for keys, subkeys in zip(keys_list, subkeys_list): subvals_list = [ custom_func(lazydict, key, [subkey])[0] for key, subkey in zip(keys, subkeys) ] data_lists.append(subvals_list) return data_lists def multigroup_lookup(lazydict, keys_list, subkeys_list, custom_func): r""" Efficiently calls custom_func for each item in zip(keys_list, subkeys_list) by grouping subkeys to minimize the number of calls to custom_func. We are given multiple lists of keys, and subvals. The goal is to group the subvals by keys and apply the subval lookups (a call to a function) to the key only once and at the same time. Args: lazydict (dict of utool.LazyDict): keys_list (list): subkeys_list (list): custom_func (func): must have signature custom_func(lazydict, key, subkeys) SeeAlso: vt.multigroup_lookup_naive - unoptomized version, but simple to read Example: >>> # SLOW_DOCTEST >>> # xdoctest: +SKIP >>> from vtool_ibeis.other import * # NOQA >>> import vtool_ibeis as vt >>> fpath_list = [ut.grab_test_imgpath(key) for key in ut.util_grabdata.get_valid_test_imgkeys()] >>> lazydict = {count: vt.testdata_annot_metadata(fpath) for count, fpath in enumerate(fpath_list)} >>> aids_list = np.array([(3, 2), (0, 2), (1, 2), (2, 3)]) >>> fms = np.array([[2, 5], [2, 3], [2, 1], [3, 4]]) >>> keys_list = aids_list.T >>> subkeys_list = fms.T >>> def custom_func(lazydict, key, subkeys): >>> annot = lazydict[key] >>> kpts = annot['kpts'] >>> rchip = annot['rchip'] >>> kpts_m = kpts.take(subkeys, axis=0) >>> warped_patches = vt.get_warped_patches(rchip, kpts_m)[0] >>> return warped_patches >>> data_lists1 = multigroup_lookup(lazydict, keys_list, subkeys_list, custom_func) >>> data_lists2 = multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func) >>> vt.sver_c_wrapper.asserteq(data_lists1, data_lists2) Example: >>> keys_list = [np.array([]), np.array([]), np.array([])] >>> subkeys_list = [np.array([]), np.array([]), np.array([])] """ import vtool_ibeis as vt # Group the keys in each multi-list individually multi_groups = [vt.group_indices(keys) for keys in keys_list] # Combine keys across multi-lists usings a dict_stack dict_list = [dict(zip(k, v)) for k, v in multi_groups] nested_order = ut.dict_stack2(dict_list, default=[]) # Use keys and values for explicit ordering group_key_list = list(nested_order.keys()) if len(group_key_list) == 0: return multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func) group_subxs_list = list(nested_order.values()) # Extract unique and flat subkeys. # Maintain an information to invert back into multi-list form group_uf_subkeys_list = [] group_invx_list = [] group_cumsum_list = [] for key, subxs in zip(group_key_list, group_subxs_list): # Group subkeys for each key subkey_group = vt.ziptake(subkeys_list, subxs, axis=0) flat_subkeys, group_cumsum = ut.invertible_flatten2(subkey_group) unique_subkeys, invx = np.unique(flat_subkeys, return_inverse=True) # Append info group_uf_subkeys_list.append(unique_subkeys) group_invx_list.append(invx) group_cumsum_list.append(group_cumsum) # Apply custom function (lookup) to unique each key and its flat subkeys group_subvals_list = [ custom_func(lazydict, key, subkeys) for key, subkeys in zip(group_key_list, group_uf_subkeys_list) ] # Efficiently invert values back into input shape # First invert the subkey groupings multi_subvals_list = [[] for _ in range(len(multi_groups))] _iter = zip(group_key_list, group_subvals_list, group_cumsum_list, group_invx_list) for key, subvals, group_cumsum, invx in _iter: nonunique_subvals = list(ub.take(subvals, invx)) unflat_subvals_list = ut.unflatten2(nonunique_subvals, group_cumsum) for subvals_list, unflat_subvals in zip(multi_subvals_list, unflat_subvals_list): subvals_list.append(unflat_subvals) # Then invert the key groupings data_lists = [] multi_groupxs_list = list(zip(*group_subxs_list)) for subvals_list, groupxs in zip(multi_subvals_list, multi_groupxs_list): datas = vt.invert_apply_grouping(subvals_list, groupxs) data_lists.append(datas) return data_lists def asserteq(output1, output2, thresh=1E-8, nestpath=None, level=0, lbl1=None, lbl2=None, output_lbl=None, verbose=True, iswarning=False): """ recursive equality checks asserts that output1 and output2 are close to equal. """ failed = False if lbl1 is None: lbl1 = ut.get_varname_from_stack(output1, N=1) if lbl2 is None: lbl2 = ut.get_varname_from_stack(output2, N=1) # Setup if nestpath is None: # record the path through the nested structure as testing goes on nestpath = [] # print out these variables in all error cases common_keys = ['lbl1', 'lbl2', 'level', 'nestpath'] # CHECK: types try: assert type(output1) == type(output2), 'types are not equal' except AssertionError as ex: print(type(output1)) print(type(output2)) ut.printex(ex, 'FAILED TYPE CHECKS', keys=common_keys + [(type, 'output1'), (type, 'output2')], iswarning=iswarning) failed = True if not iswarning: raise # CHECK: length if hasattr(output1, '__len__'): try: assert len(output1) == len(output2), 'lens are not equal' except AssertionError as ex: keys = common_keys + [(len, 'output1'), (len, 'output2'), ] ut.printex(ex, 'FAILED LEN CHECKS. ', keys=keys) raise # CHECK: ndarrays if isinstance(output1, np.ndarray): ndarray_keys = ['output1.shape', 'output2.shape'] # CHECK: ndarray shape try: assert output1.shape == output2.shape, 'ndarray shapes are unequal' except AssertionError as ex: keys = common_keys + ndarray_keys ut.printex(ex, 'FAILED NUMPY SHAPE CHECKS.', keys=keys, iswarning=iswarning) failed = True if not iswarning: raise # CHECK: ndarray equality try: passed, error = ut.almost_eq(output1, output2, thresh, ret_error=True) assert np.all(passed), 'ndarrays are unequal.' except AssertionError as ex: # Statistics on value difference and value difference # above the thresholds diff_stats = ut.get_stats(error) # NOQA error_stats = ut.get_stats(error[error >= thresh]) # NOQA keys = common_keys + ndarray_keys + [ (len, 'output1'), (len, 'output2'), ('diff_stats'), ('error_stats'), ('thresh'), ] PRINT_VAL_SAMPLE = True if PRINT_VAL_SAMPLE: keys += ['output1', 'output2'] ut.printex(ex, 'FAILED NUMPY CHECKS.', keys=keys, iswarning=iswarning) failed = True if not iswarning: raise # CHECK: list/tuple items elif isinstance(output1, (tuple, list)): for count, (item1, item2) in enumerate(zip(output1, output2)): # recursive call try: asserteq( item1, item2, lbl1=lbl2, lbl2=lbl1, thresh=thresh, nestpath=nestpath + [count], level=level + 1) except AssertionError as ex: ut.printex(ex, 'recursive call failed', keys=common_keys + ['item1', 'item2', 'count'], iswarning=iswarning) failed = True if not iswarning: raise # CHECK: scalars else: try: assert output1 == output2, 'output1 != output2' except AssertionError as ex: print('nestpath= %r' % (nestpath,)) ut.printex(ex, 'FAILED SCALAR CHECK.', keys=common_keys + ['output1', 'output2'], iswarning=iswarning) failed = True if not iswarning: raise if verbose and level == 0: if not failed: print('PASSED %s == %s' % (lbl1, lbl2)) else: print('WARNING %s != %s' % (lbl1, lbl2)) def compare_implementations(func1, func2, args, show_output=False, lbl1='', lbl2='', output_lbl=None): """ tests two different implementations of the same function """ print('+ --- BEGIN COMPARE IMPLEMENTATIONS ---') func1_name = ut.get_funcname(func1) func2_name = ut.get_funcname(func2) print('func1_name = %r' % (func1_name,)) print('func2_name = %r' % (func2_name,)) # test both versions with ub.Timer('time func1=' + func1_name) as t1: output1 = func1(*args) with ub.Timer('time func2=' + func2_name) as t2: output2 = func2(*args) if t2.ellapsed == 0: t2.ellapsed = 1e9 print('speedup = %r' % (t1.ellapsed / t2.ellapsed)) try: asserteq(output1, output2, lbl1=lbl1, lbl2=lbl2, output_lbl=output_lbl) print('implementations are in agreement :) ') except AssertionError as ex: # prints out a nested list corresponding to nested structure ut.printex(ex, 'IMPLEMENTATIONS DO NOT AGREE', keys=[ ('func1_name'), ('func2_name'), ] ) raise finally: depth_profile1 = ut.depth_profile(output1) depth_profile2 = ut.depth_profile(output2) type_profile1 = ut.list_type_profile(output1) type_profile2 = ut.list_type_profile(output2) print('depth_profile1 = ' + ub.repr2(depth_profile1)) print('depth_profile2 = ' + ub.repr2(depth_profile2)) print('type_profile1 = ' + (type_profile1)) print('type_profile2 = ' + (type_profile2)) print('L ___ END COMPARE IMPLEMENTATIONS ___') return output1 def greedy_setcover(universe, subsets, weights=None): """ Copied implmentation of greedy set cover from stack overflow. Needs work. References: http://stackoverflow.com/questions/7942312/of-greedy-set-cover-faster Example: >>> # SLOW_DOCTEST >>> # xdoctest: +SKIP >>> from vtool_ibeis.other import * # NOQA >>> import vtool_ibeis as vt >>> universe = set([1,2,3,4]) >>> subsets = [set([1,2]), set([1]), set([1,2,3]), set([1]), set([3,4]), >>> set([4]), set([1,2]), set([3,4]), set([1,2,3,4])] >>> weights = [1, 1, 2, 2, 2, 3, 3, 4, 4] >>> chosen, costs = greedy_setcover(universe, subsets, weights) >>> print('Cover: %r' % (chosen,)) >>> print('Total Cost: %r=sum(%r)' % (sum(costs), costs)) """ #unchosen = subsets.copy() uncovered = universe chosen = [] costs = [] def findMin(subsets, uncovered, weights): minCost = np.inf minElement = -1 for i, s in enumerate(subsets): num_isect = len(s.intersection(uncovered)) try: cost = weights[i] / num_isect if cost < minCost: minCost = cost minElement = i except ZeroDivisionError: pass return subsets[minElement], weights[minElement] while len(uncovered) != 0: S_i, cost = findMin(subsets, uncovered, weights) chosen.append(S_i) uncovered = uncovered.difference(S_i) costs.append(cost) return chosen, costs def find_elbow_point(curve): """ Finds the on the curve point furthest from the line defined by the endpoints of the curve. Args: curve (ndarray): a monotonic curve Returns: int: tradeoff_idx - this is an elbow point in the curve References: http://stackoverflow.com/questions/2018178/trade-off-point-on-curve CommandLine: python -m vtool_ibeis.other find_elbow_point --show Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> curve = np.exp(np.linspace(0, 10, 100)) >>> tradeoff_idx = find_elbow_point(curve) >>> result = ('tradeoff_idx = %s' % (ub.repr2(tradeoff_idx),)) >>> print(result) >>> assert tradeoff_idx == 76 >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt >>> import vtool_ibeis as vt >>> point = [tradeoff_idx, curve[tradeoff_idx]] >>> segment = np.array([[0, len(curve) - 1], [curve[0], curve[-1]]]) >>> e1, e2 = segment.T >>> dist_point = vt.closest_point_on_line_segment(point, e1, e2) >>> dist_line = np.array([dist_point, point]).T >>> pt.plot(curve, 'r', label='curve') >>> pt.plot(point[0], point[1], 'go', markersize=10, label='tradeoff point') >>> pt.plot(dist_line[0], dist_line[1], '-xb') >>> pt.plot(segment[0], segment[1], '-xb') >>> pt.legend() >>> ut.show_if_requested() """ num_points = len(curve) all_coords = np.vstack((np.arange(num_points), curve)).T np.array([np.arange(num_points), curve]) first_point = all_coords[0] line_vec = all_coords[-1] - all_coords[0] line_vec_norm = line_vec / np.sqrt(np.sum(line_vec ** 2)) vec_from_first = all_coords - first_point tiled_line_vec_norm = np.tile(line_vec_norm, (num_points, 1)) scalar_product = np.sum(vec_from_first * tiled_line_vec_norm, axis=1) vec_from_first_parallel = np.outer(scalar_product, line_vec_norm) vec_to_line = vec_from_first - vec_from_first_parallel dist_to_line = np.sqrt(np.sum(vec_to_line ** 2, axis=1)) tradeoff_idx = np.argmax(dist_to_line) return tradeoff_idx def zstar_value(conf_level=.95): """ References: http://stackoverflow.com/questions/28242593/correct-way-to-obtain-confidence-interval-with-scipy """ import scipy.stats as spstats #distribution = #spstats.t.interval(.95, df=(ss - 1))[1] #spstats.norm.interval(.95, df=1)[1] zstar = spstats.norm.interval(conf_level)[1] #zstar = spstats.norm.ppf(spstats.norm.cdf(0) + (conf_level / 2)) return zstar def calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level=.95): """ Determines a error bars of sample References: https://www.qualtrics.com/blog/determining-sample-size/ http://www.surveysystem.com/sscalc.htm https://en.wikipedia.org/wiki/Sample_size_determination http://www.surveysystem.com/sample-size-formula.htm http://courses.wcupa.edu/rbove/Berenson/10th%20ed%20CD-ROM%20topics/section8_7.pdf https://en.wikipedia.org/wiki/Standard_normal_table https://www.unc.edu/~rls/s151-2010/class23.pdf """ #zValC_lookup = {.95: 3.8416, .99: 6.6564,} # We sampled ss from a population of pop and got num_positive true cases. ss = sample_size # Calculate at this confidence level zval = zstar_value(conf_level) # Calculate our plus/minus error in positive percentage pos_frac = (num_positive / ss) pf = (pop - ss) / (pop - 1) err_frac = zval * np.sqrt((pos_frac) * (1 - pos_frac) * pf / ss) lines = [] lines.append('population_size = %r' % (pop,)) lines.append('sample_size = %r' % (ss,)) lines.append('num_positive = %r' % (num_positive,)) lines.append('positive rate is %.2f%% ± %.2f%% @ %r confidence' % ( 100 * pos_frac, 100 * err_frac, conf_level)) lines.append('positive num is %d ± %d @ %r confidence' % ( int(np.round(pop * pos_frac)), int(np.round(pop * err_frac)), conf_level)) print(ut.msgblock('Calculate Sample Error Margin', '\n'.join(lines))) def calc_sample_from_error_bars(err_frac, pop, conf_level=.95, prior=.5): """ Determines a reasonable sample size to achieve desired error bars. import sympy p, n, N, z = sympy.symbols('prior, ss, pop, zval') me = sympy.symbols('err_frac') expr = (z * sympy.sqrt((p * (1 - p) / n) * ((N - n) / (N - 1)))) equation = sympy.Eq(me, expr) nexpr = sympy.solve(equation, [n])[0] nexpr = sympy.simplify(nexpr) import autopep8 print(autopep8.fix_lines(['ss = ' + str(nexpr)], autopep8._get_options({}, False))) ss = -pop * prior* (zval**2) *(prior - 1) / ((err_frac ** 2) * pop - (err_frac**2) - prior * (zval**2) * (prior - 1)) ss = pop * prior * zval ** 2 * (prior - 1) / (-err_frac ** 2 * pop + err_frac ** 2 + prior * zval ** 2 * (prior - 1)) """ # How much confidence ydo you want (in fraction of positive results) #zVal_lookup = {.95: 1.96, .99: 2.58,} zval = zstar_value(conf_level) std = .5 zval * std * (1 - std) / err_frac #margin_error = err_frac #margin_error = zval * np.sqrt(prior * (1 - prior) / ss) #margin_error_small = zval * np.sqrt((prior * (1 - prior) / ss) * ((pop - ss) / (pop - 1))) #prior = .5 # initial uncertainty # Used for large samples #ss_large = (prior * (1 - prior)) / ((margin_error / zval) ** 2) # Used for small samples ss_numer = pop * prior * zval ** 2 * (1 - prior) ss_denom = (err_frac ** 2 * pop + err_frac ** 2 + prior * zval ** 2 * (1 - prior)) ss_small = ss_numer / ss_denom #ss_ = ((zval ** 2) * 0.25) / (err_frac ** 2) #ss = int(np.ceil(ss_ / (1 + ((ss_ - 1) / pop)))) ss = int(np.ceil(ss_small)) lines = [] lines.append('population_size = %r' % (pop,)) lines.append('positive_prior = %r' % (prior,)) lines.append('Desired confidence = %.2f' % (conf_level,)) lines.append('Desired error rate is %.2f%%' % (err_frac * 100)) lines.append('Desired number of errors is %d' % (int(round(err_frac * pop)))) lines.append('Need sample sample size of %r to achive requirements' % (ss,)) print(ut.msgblock('Calculate Required Sample Size', '\n'.join(lines))) def inbounds(num, low, high, eq=False): r""" Args: num (scalar or ndarray): low (scalar or ndarray): high (scalar or ndarray): eq (bool): Returns: scalar or ndarray: is_inbounds CommandLine: xdoctest -m ~/code/vtool_ibeis/vtool_ibeis/other.py inbounds Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> import utool as ut >>> num = np.array([[ 0. , 0.431, 0.279], ... [ 0.204, 0.352, 0.08 ], ... [ 0.107, 0.325, 0.179]]) >>> low = .1 >>> high = .4 >>> eq = False >>> is_inbounds = inbounds(num, low, high, eq) >>> result = ub.repr2(is_inbounds, with_dtype=True) >>> print(result) """ import operator as op less = op.le if eq else op.lt greater = op.ge if eq else op.gt and_ = np.logical_and if isinstance(num, np.ndarray) else op.and_ is_inbounds = and_(greater(num, low), less(num, high)) return is_inbounds def fromiter_nd(iter_, shape, dtype): """ Like np.fromiter but handles iterators that generated n-dimensional arrays. Slightly faster than np.array. maybe commit to numpy? Args: iter_ (iter): an iterable that generates homogenous ndarrays shape (tuple): the expected output shape dtype (dtype): the numpy datatype of the generated ndarrays Note: The iterable must yeild a numpy array. It cannot yeild a Python list. CommandLine: python -m vtool_ibeis.other fromiter_nd --show Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> dtype = np.float >>> total = 11 >>> rng = np.random.RandomState(0) >>> iter_ = (rng.rand(5, 7, 3) for _ in range(total)) >>> shape = (total, 5, 7, 3) >>> result = fromiter_nd(iter_, shape, dtype) >>> assert result.shape == shape Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> dtype = np.int >>> qfxs = np.array([1, 2, 3]) >>> dfxs = np.array([4, 5, 6]) >>> iter_ = (np.array(x) for x in ut.product(qfxs, dfxs)) >>> total = len(qfxs) * len(dfxs) >>> shape = (total, 2) >>> result = fromiter_nd(iter_, shape, dtype) >>> assert result.shape == shape """ num_rows = shape[0] chunksize = np.prod(shape[1:]) itemsize = np.dtype(dtype).itemsize # Create dtype that makes an entire ndarray appear as a single item chunk_dtype = np.dtype((np.void, itemsize * chunksize)) arr = np.fromiter(iter_, count=num_rows, dtype=chunk_dtype) # Convert back to original dtype and shape arr = arr.view(dtype) arr.shape = shape return arr def make_video2(images, outdir): import vtool_ibeis as vt from os.path import join n = str(int(np.ceil(np.log10(len(images))))) fmt = 'frame_%0' + n + 'd.png' ub.ensuredir(outdir) for count, img in enumerate(images): fname = join(outdir, fmt % (count)) vt.imwrite(fname, img) def make_video(images, outvid=None, fps=5, size=None, is_color=True, format='XVID'): """ Create a video from a list of images. References: http://www.xavierdupre.fr/blog/2016-03-30_nojs.html http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html @param outvid output video @param images list of images to use in the video @param fps frame per second @param size size of each frame @param is_color color @param format see http://www.fourcc.org/codecs.php The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/. By default, the video will have the size of the first image. It will resize every image to this size before adding them to the video. """ # format = 'MJPG' # format = 'FMP4' import cv2 fourcc = cv2.VideoWriter_fourcc(*str(format)) vid = None for img in images: if vid is None: if size is None: size = img.shape[1], img.shape[0] vid = cv2.VideoWriter(outvid, fourcc, float(fps), size, is_color) if size[0] != img.shape[1] and size[1] != img.shape[0]: img = cv2.resize(img, size) vid.write(img) vid.release() return vid def take_col_per_row(arr, colx_list): """ takes a column from each row Ignore: num_rows = 1000 num_cols = 4 arr = np.arange(10 * 4).reshape(10, 4) colx_list = (np.random.rand(10) * 4).astype(np.int) %timeit np.array([row[cx] for (row, cx) in zip(arr, colx_list)]) %timeit arr.ravel().take(np.ravel_multi_index((np.arange(len(colx_list)), colx_list), arr.shape)) %timeit arr.ravel().take(colx_list + np.arange(arr.shape[0]) * arr.shape[1]) """ # out = np.array([row[cx] for (row, cx) in zip(arr, colx_list)]) multix_list = np.ravel_multi_index((np.arange(len(colx_list)), colx_list), arr.shape) out = arr.ravel().take(multix_list) return out if __name__ == '__main__': """ CommandLine: xdoctest -m vtool_ibeis.other """ import xdoctest xdoctest.doctest_module(__file__)
from Simulador import Simulador import math import pandas as pd # d = pd.read_pickle('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/simulacoes_chance_30%.pkl') # d.to_csv(r'C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/simulacoes_chance_30%.txt', sep=' ', index=False) # d = pd.read_pickle('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/simulacoes_chance_100%.pkl') # d.to_csv(r'C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/simulacoes_chance_100%.txt', sep=' ', index=False) # print(d.head()) nome_simulacao = "simulacoes_chance_100%" n_simulacoes = 10 tamanho_matriz = 10 chance_infeccao = 1 chance_infeccao_sintomatico = 0.2 chance_morte = 0.02 atualizacoes_cura = 10 inserir_infectados_aleatorios = False import numpy as np import random dados_simulacoes = pd.DataFrame(dtype=np.int) for i in range(n_simulacoes): sim = Simulador( tamanho_matriz, chance_infeccao, chance_infeccao_sintomatico, chance_morte, atualizacoes_cura, inserir_infectados_aleatorios) sim.executar_simulacao() dados_simulacoes = dados_simulacoes.append(sim.dict_resumo, ignore_index = True) dados_simulacoes = dados_simulacoes[["pop_inicial", "tipo1_inicial", "tipo2_inicial", "n/2_100%_infectados", "tipo1_n/2", "tipo2_n/2", "curados_n/2", "mortos_n/2", "n/2+1_100%_infectados", "tipo1_n/2+1", "tipo2_n/2+1", "curados_n/2+1", "mortos_n/2+1", "n_atualizacoes_100%_infectados", "tipo1_n", "tipo2_n", "curados_n", "mortos_n", "numero_total_atualizacoes", "sadios_final", "curados_final", "mortos_final"]].astype(int) dados_simulacoes.to_csv('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/'+ nome_simulacao + '.txt', sep=' ', index=False) nome_simulacao = "simulacoes_chance_30%" chance_infeccao = 0.3 dados_simulacoes = pd.DataFrame(dtype=np.int) for i in range(n_simulacoes): sim = Simulador( tamanho_matriz, chance_infeccao, chance_infeccao_sintomatico, chance_morte, atualizacoes_cura, inserir_infectados_aleatorios) sim.executar_simulacao() dados_simulacoes = dados_simulacoes.append(sim.dict_resumo, ignore_index = True) dados_simulacoes = dados_simulacoes[["pop_inicial", "tipo1_inicial", "tipo2_inicial", "n/2_100%_infectados", "tipo1_n/2", "tipo2_n/2", "curados_n/2", "mortos_n/2", "n/2+1_100%_infectados", "tipo1_n/2+1", "tipo2_n/2+1", "curados_n/2+1", "mortos_n/2+1", "n_atualizacoes_100%_infectados", "tipo1_n", "tipo2_n", "curados_n", "mortos_n", "numero_total_atualizacoes", "sadios_final", "curados_final", "mortos_final"]].astype(int) dados_simulacoes.to_csv('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/'+ nome_simulacao + '.txt', sep=' ', index=False) print(dados_simulacoes)
# Generated by Django 3.2.8 on 2021-11-18 08:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('userprofile', '0001_initial'), ] operations = [ migrations.AddField( model_name='profile', name='username', field=models.CharField(blank=True, max_length=300, null=True), ), migrations.AlterField( model_name='profile', name='facebook', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='github', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='instagram', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='linkedin', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='twitter', field=models.CharField(blank=True, max_length=300), ), migrations.AlterField( model_name='profile', name='website', field=models.CharField(blank=True, max_length=300), ), ]
## seq2seq 做数学题 import torch from tqdm import tqdm import torch.nn as nn from torch.optim import Adam import numpy as np import os import json import time import glob import bert_seq2seq from torch.utils.data import Dataset, DataLoader from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab from bert_seq2seq.utils import load_bert import re vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置 word2idx = load_chinese_base_vocab(vocab_path) model_name = "roberta" # 选择模型名字 model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # 模型位置 recent_model_path = "./state_dict/bert_math_ques_model.bin" # 用于把已经训练好的模型继续训练 model_save_path = "./state_dict/bert_math_ques_model.bin" batch_size = 16 lr = 1e-5 maxlen = 256 train_data_path = "./state_dict/train.ape.json" val_data_path = "./state_dict/test.ape.json" def remove_bucket(equation): """去掉冗余的括号 """ l_buckets, buckets = [], [] for i, c in enumerate(equation): if c == '(': l_buckets.append(i) elif c == ')': buckets.append((l_buckets.pop(), i)) eval_equation = eval(equation) for l, r in buckets: new_equation = '%s %s %s' % ( equation[:l], equation[l + 1:r], equation[r + 1:] ) try: if is_equal(eval(new_equation.replace(' ', '')), eval_equation): equation = new_equation except: pass return equation.replace(' ', '') def is_equal(a, b): """比较两个结果是否相等 """ a = round(float(a), 6) b = round(float(b), 6) return a == b ## 苏神baseline 读取数据 def load_data(filename): """读取训练数据,并做一些标准化,保证equation是可以eval的 参考:https://kexue.fm/archives/7809 """ D = [] # index = 0 for l in open(filename): # index += 1 # if index == 100: # break l = json.loads(l) # print(l) question, equation, answer = l['original_text'], l['equation'], l['ans'] # 处理带分数 question = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', question) equation = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', equation) answer = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', answer) equation = re.sub('(\d+)\(', '\\1+(', equation) answer = re.sub('(\d+)\(', '\\1+(', answer) # 分数去括号 question = re.sub('\((\d+/\d+)\)', '\\1', question) # 处理百分数 equation = re.sub('([\.\d]+)%', '(\\1/100)', equation) answer = re.sub('([\.\d]+)%', '(\\1/100)', answer) # 冒号转除号、剩余百分号处理 equation = equation.replace(':', '/').replace('%', '/100') answer = answer.replace(':', '/').replace('%', '/100') if equation[:2] == 'x=': equation = equation[2:] try: # print(equation) # print(answer) # print("~~~~~~~`") if is_equal(eval(equation), eval(answer)): D.append((question, remove_bucket(equation), answer)) except Exception as e: print(e) continue return D class BertDataset(Dataset): """ 针对特定数据集,定义一个相关的取数据的方式 """ def __init__(self, data) : ## 一般init函数是加载所有数据 super(BertDataset, self).__init__() self.data = data print("data size is " + str(len(data))) self.idx2word = {k: v for v, k in word2idx.items()} self.tokenizer = Tokenizer(word2idx) def __getitem__(self, i): ## 得到单个数据 # print(i) single_data = self.data[i] original_text = single_data[0] ans_text = single_data[1] token_ids, token_type_ids = self.tokenizer.encode( original_text, ans_text, max_length=maxlen ) output = { "token_ids": token_ids, "token_type_ids": token_type_ids, } return output def __len__(self): return len(self.data) def collate_fn(batch): """ 动态padding, batch为一部分sample """ def padding(indice, max_length, pad_idx=0): """ pad 函数 """ pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice] return torch.tensor(pad_indice) token_ids = [data["token_ids"] for data in batch] max_length = max([len(t) for t in token_ids]) token_type_ids = [data["token_type_ids"] for data in batch] token_ids_padded = padding(token_ids, max_length) token_type_ids_padded = padding(token_type_ids, max_length) target_ids_padded = token_ids_padded[:, 1:].contiguous() return token_ids_padded, token_type_ids_padded, target_ids_padded class Trainer: def __init__(self): # 判断是否有可用GPU data = load_data(train_data_path) self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device: " + str(self.device)) # 定义模型 self.bert_model = load_bert(word2idx, model_name=model_name) ## 加载预训练的模型参数~ self.bert_model.load_pretrain_params(model_path) # 将模型发送到计算设备(GPU或CPU) self.bert_model.set_device(self.device) # 声明需要优化的参数 self.optim_parameters = list(self.bert_model.parameters()) self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-5) # 声明自定义的数据加载器 dataset = BertDataset(data) self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn) self.best_acc = 0.0 def train(self, epoch): # 一个epoch的训练 self.bert_model.train() self.iteration(epoch, dataloader=self.dataloader, train=True) def save(self, save_path): """ 保存模型 """ self.bert_model.save_all_params(save_path) print("{} saved!".format(save_path)) def iteration(self, epoch, dataloader, train=True): total_loss = 0 start_time = time.time() ## 得到当前时间 step = 0 report_loss = 0 print("starting train.......") # for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True): for token_ids, token_type_ids, target_ids in dataloader: step += 1 if step % 3000 == 0: self.bert_model.eval() test_data = ["王艳家买了一台洗衣机和一台电冰箱,一共花了6000元,电冰箱的价钱是洗衣机的3/5,求洗衣机的价钱.", "六1班原来男生占总数的2/5,又转来5名男生,现在男生占总数的5/11,女生有多少人?", "两个相同的数相乘,积是3600,这个数是多少."] for text in test_data: print(self.bert_model.generate(text, beam_size=3, device=self.device)) print("loss is " + str(report_loss)) report_loss = 0 self.bert_model.train() if step % 10000 == 0: ## 2000步集中测试一下 print("validing..........") acc = self.validation() print("valid acc is " + str(acc)) if acc > self.best_acc: self.best_acc = acc self.save(model_save_path) token_ids = token_ids.to(self.device) token_type_ids = token_type_ids.to(self.device) target_ids = target_ids.to(self.device) # 因为传入了target标签,因此会计算loss并且返回 predictions, loss = self.bert_model(token_ids, token_type_ids, labels=target_ids, device=self.device ) report_loss += loss.item() # 反向传播 if train: # 清空之前的梯度 self.optimizer.zero_grad() # 反向传播, 获取新的梯度 loss.backward() # 用获取的梯度更新模型参数 self.optimizer.step() # 为计算当前epoch的平均loss total_loss += loss.item() end_time = time.time() spend_time = end_time - start_time # 打印训练信息 print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time)) # 保存模型 # self.save(model_save_path) def eval_equation(self, equation): ans = -10000 try: ans = eval(equation) except: pass return ans def validation(self): val_data = load_data(val_data_path) # 用0 和 2 self.bert_model.eval() right = 0.0 num = len(val_data) # for each_data in tqdm(val_data, total=num): for each_data in val_data: equation = self.bert_model.generate(each_data[0], beam_size=3, device=self.device) pred_ans = self.eval_equation(equation.replace(" ", "")) ans1 = each_data[2] try : if "/" in each_data[2] or "+" in each_data[2] or "-" in each_data[2] or "*" in each_data[2]: # print(each_data[2]) # equation1 = re.sub('\((\d+/\d+)\)', '\\1', str(each_data[2])) ans1 = eval(each_data[2]) if abs(float(pred_ans) - float(ans1)) < 0.01: right += 1 # print("right! pred is " + str(pred_ans) + " ans is " + str(each_data[2])) else: pass # print("err! pred is " + str(pred_ans) + " ans is " + str(each_data[2])) except Exception as e: print(e) self.bert_model.train() return right / num if __name__ == '__main__': trainer = Trainer() train_epoches = 25 for epoch in range(train_epoches): # 训练一个epoch trainer.train(epoch)
from .backends import GraphQLFilterBackend from .mixins import FilterMixin
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest from copy import deepcopy class TestSequenceTopkAvgPoolingOp(OpTest): def setUp(self): self.init_op_type() self.set_data() self.compute() def init_op_type(self): self.op_type = "sequence_topk_avg_pooling" def set_data(self): topks = [2] channel_num = 3 dim = 10 row = [2, 4] col = [3, 2] self.init_data(topks, channel_num, row, col, dim) def init_data(self, topks, channel_num, row, col, dim=10): self.attrs = {"topks": topks, "channel_num": channel_num} feature = [row[i] * col[i] for i in range(len(row))] numel = sum(feature) * channel_num x_data = np.random.random((numel, )).astype('float32') x_lod = [[x * channel_num for x in feature]] row_data = np.random.random((sum(row), dim)).astype('float32') col_data = np.random.random((sum(col), dim)).astype('float32') self.inputs = { 'X': (x_data, x_lod), 'ROW': (row_data, [row]), 'COLUMN': (col_data, [col]) } def compute(self): topks = self.attrs['topks'] max_k = topks[-1] x_data, x_lod = self.inputs['X'] row_data, row_lod = self.inputs['ROW'] col_data, col_lod = self.inputs['COLUMN'] channel_num = self.attrs['channel_num'] out = np.zeros((0, len(topks) * channel_num), dtype=x_data.dtype) pos = np.zeros((0, ), dtype='int32') out_lod = deepcopy(row_lod) offset = 0 for idx in range(len(x_lod[0])): x_len = x_lod[0][idx] self.assertTrue( x_len == channel_num * row_lod[0][idx] * col_lod[0][idx], "x_len: %s can't mod channel_num: %s" % (x_len, channel_num)) # feature = x_len / channel_num out_tmp = np.zeros((0, ), dtype=x_data.dtype) pos_tmp = np.zeros((0, ), dtype='int32') for ch in range(channel_num): for r_id in range(row_lod[0][idx]): x_sub = x_data[offset:(offset + col_lod[0][idx])] topk_val, topk_pos = self.get_topk(x_sub, max_k) sum_data = self.topk_sum(topk_val, topk_pos, max_k) new_feature = np.array( [sum_data[topk] / topk for topk in topks]) out_tmp = np.hstack((out_tmp, new_feature)) pos_tmp = np.hstack((pos_tmp, topk_pos)) offset += col_lod[0][idx] out_tmp = out_tmp.reshape([channel_num, -1, len(topks)]).transpose( 1, 0, 2) pos_tmp = pos_tmp.reshape([channel_num, -1, max_k]).transpose(1, 0, 2) out = np.vstack( (out, out_tmp.reshape([-1, len(topks) * channel_num]))) pos = np.hstack((pos, pos_tmp.flatten())) self.outputs = {'Out': (out.astype('float32'), out_lod), 'pos': pos} def get_topk(self, x, topk): real_topk = topk if topk < len(x) else len(x) topk_pos = np.array(x).argsort()[-topk:][::-1] topk_val = np.array(x)[topk_pos] if real_topk < topk: topk_pos = np.hstack((topk_pos, np.full((topk - real_topk, ), -1))) topk_val = np.hstack((topk_val, np.full((topk - real_topk, ), 0.0))) return topk_val, topk_pos def topk_sum(self, x, pos, max_k): sum_data = [0.] * (max_k + 1) for i in range(1, max_k + 1): if pos[i - 1] == -1: sum_data[i] = sum_data[i - 1] else: sum_data[i] = sum_data[i - 1] + x[i - 1] return sum_data def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out', max_relative_error=0.005) class TestSequenceTopkAvgPoolingOpCase1(TestSequenceTopkAvgPoolingOp): def set_data(self): topks = [2, 3] channel_num = 3 dim = 10 row = [3] col = [4] self.init_data(topks, channel_num, row, col, dim) def test_api(self): import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[1], lod_level=1) row = fluid.layers.data(name='row', shape=[10], lod_level=1) col = fluid.layers.data(name='col', shape=[10], lod_level=1) topk_avg = fluid.contrib.sequence_topk_avg_pooling( input=x, row=row, col=col, topks=[1, 3, 5], channel_num=5) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( np.random.rand(45, 1).astype('float32'), [[30, 15]], place) row_tensor = fluid.create_lod_tensor( np.random.rand(5, 10).astype('float32'), [[2, 3]], place) col_tensor = fluid.create_lod_tensor( np.random.rand(4, 10).astype('float32'), [[3, 1]], place) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) ret = exe.run( feed={'x': x_tensor, 'row': row_tensor, 'col': col_tensor}, fetch_list=[topk_avg], return_numpy=False) if __name__ == '__main__': unittest.main()
"""Classes representing lines.""" from __future__ import print_function, division, absolute_import import copy as copylib import numpy as np import skimage.draw import skimage.measure import cv2 from .. import imgaug as ia from .base import IAugmentable from .utils import (normalize_shape, project_coords, interpolate_points, _remove_out_of_image_fraction) # TODO Add Line class and make LineString a list of Line elements # TODO add to_distance_maps(), compute_hausdorff_distance(), intersects(), # find_self_intersections(), is_self_intersecting(), # remove_self_intersections() class LineString(object): """Class representing line strings. A line string is a collection of connected line segments, each having a start and end point. Each point is given as its ``(x, y)`` absolute (sub-)pixel coordinates. The end point of each segment is also the start point of the next segment. The line string is not closed, i.e. start and end point are expected to differ and will not be connected in drawings. Parameters ---------- coords : iterable of tuple of number or ndarray The points of the line string. label : None or str, optional The label of the line string. """ def __init__(self, coords, label=None): """Create a new LineString instance.""" # use the conditions here to avoid unnecessary copies of ndarray inputs if ia.is_np_array(coords): if coords.dtype.name != "float32": coords = coords.astype(np.float32) elif len(coords) == 0: coords = np.zeros((0, 2), dtype=np.float32) else: assert ia.is_iterable(coords), ( "Expected 'coords' to be an iterable, " "got type %s." % (type(coords),)) assert all([len(coords_i) == 2 for coords_i in coords]), ( "Expected 'coords' to contain (x,y) tuples, " "got %s." % (str(coords),)) coords = np.float32(coords) assert coords.ndim == 2 and coords.shape[-1] == 2, ( "Expected 'coords' to have shape (N, 2), got shape %s." % ( coords.shape,)) self.coords = coords self.label = label @property def length(self): """Compute the total euclidean length of the line string. Returns ------- float The length based on euclidean distance, i.e. the sum of the lengths of each line segment. """ if len(self.coords) == 0: return 0 return np.sum(self.compute_neighbour_distances()) @property def xx(self): """Get an array of x-coordinates of all points of the line string. Returns ------- ndarray ``float32`` x-coordinates of the line string points. """ return self.coords[:, 0] @property def yy(self): """Get an array of y-coordinates of all points of the line string. Returns ------- ndarray ``float32`` y-coordinates of the line string points. """ return self.coords[:, 1] @property def xx_int(self): """Get an array of discrete x-coordinates of all points. The conversion from ``float32`` coordinates to ``int32`` is done by first rounding the coordinates to the closest integer and then removing everything after the decimal point. Returns ------- ndarray ``int32`` x-coordinates of the line string points. """ return np.round(self.xx).astype(np.int32) @property def yy_int(self): """Get an array of discrete y-coordinates of all points. The conversion from ``float32`` coordinates to ``int32`` is done by first rounding the coordinates to the closest integer and then removing everything after the decimal point. Returns ------- ndarray ``int32`` y-coordinates of the line string points. """ return np.round(self.yy).astype(np.int32) @property def height(self): """Compute the height of a bounding box encapsulating the line. The height is computed based on the two points with lowest and largest y-coordinates. Returns ------- float The height of the line string. """ if len(self.coords) <= 1: return 0 return np.max(self.yy) - np.min(self.yy) @property def width(self): """Compute the width of a bounding box encapsulating the line. The width is computed based on the two points with lowest and largest x-coordinates. Returns ------- float The width of the line string. """ if len(self.coords) <= 1: return 0 return np.max(self.xx) - np.min(self.xx) def get_pointwise_inside_image_mask(self, image): """Determine per point whether it is inside of a given image plane. Parameters ---------- image : ndarray or tuple of int Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. Returns ------- ndarray ``(N,) ``bool`` array with one value for each of the ``N`` points indicating whether it is inside of the provided image plane (``True``) or not (``False``). """ # pylint: disable=misplaced-comparison-constant if len(self.coords) == 0: return np.zeros((0,), dtype=bool) shape = normalize_shape(image) height, width = shape[0:2] x_within = np.logical_and(0 <= self.xx, self.xx < width) y_within = np.logical_and(0 <= self.yy, self.yy < height) return np.logical_and(x_within, y_within) # TODO add closed=False/True? def compute_neighbour_distances(self): """Compute the euclidean distance between each two consecutive points. Returns ------- ndarray ``(N-1,)`` ``float32`` array of euclidean distances between point pairs. Same order as in `coords`. """ if len(self.coords) <= 1: return np.zeros((0,), dtype=np.float32) return np.sqrt( np.sum( (self.coords[:-1, :] - self.coords[1:, :]) ** 2, axis=1 ) ) # TODO change output to array def compute_pointwise_distances(self, other, default=None): """Compute min distances between points of this and another line string. Parameters ---------- other : tuple of number or imgaug.augmentables.kps.Keypoint or imgaug.augmentables.LineString Other object to which to compute the distances. default : any Value to return if `other` contains no points. Returns ------- list of float or any For each coordinate of this line string, the distance to any closest location on `other`. `default` if no distance could be computed. """ import shapely.geometry from .kps import Keypoint if isinstance(other, Keypoint): other = shapely.geometry.Point((other.x, other.y)) elif isinstance(other, LineString): if len(other.coords) == 0: return default if len(other.coords) == 1: other = shapely.geometry.Point(other.coords[0, :]) else: other = shapely.geometry.LineString(other.coords) elif isinstance(other, tuple): assert len(other) == 2, ( "Expected tuple 'other' to contain exactly two entries, " "got %d." % (len(other),)) other = shapely.geometry.Point(other) else: raise ValueError( "Expected Keypoint or LineString or tuple (x,y), " "got type %s." % (type(other),)) return [shapely.geometry.Point(point).distance(other) for point in self.coords] def compute_distance(self, other, default=None): """Compute the minimal distance between the line string and `other`. Parameters ---------- other : tuple of number or imgaug.augmentables.kps.Keypoint or imgaug.augmentables.LineString Other object to which to compute the distance. default : any Value to return if this line string or `other` contain no points. Returns ------- float or any Minimal distance to `other` or `default` if no distance could be computed. """ # FIXME this computes distance pointwise, does not have to be identical # with the actual min distance (e.g. edge center to other's point) distances = self.compute_pointwise_distances(other, default=[]) if len(distances) == 0: return default return min(distances) # TODO update BB's contains(), which can only accept Keypoint currently def contains(self, other, max_distance=1e-4): """Estimate whether a point is on this line string. This method uses a maximum distance to estimate whether a point is on a line string. Parameters ---------- other : tuple of number or imgaug.augmentables.kps.Keypoint Point to check for. max_distance : float Maximum allowed euclidean distance between the point and the closest point on the line. If the threshold is exceeded, the point is not considered to fall on the line. Returns ------- bool ``True`` if the point is on the line string, ``False`` otherwise. """ return self.compute_distance(other, default=np.inf) < max_distance def project(self, from_shape, to_shape): """Project the line string onto a differently shaped image. E.g. if a point of the line string is on its original image at ``x=(10 of 100 pixels)`` and ``y=(20 of 100 pixels)`` and is projected onto a new image with size ``(width=200, height=200)``, its new position will be ``(x=20, y=40)``. This is intended for cases where the original image is resized. It cannot be used for more complex changes (e.g. padding, cropping). Parameters ---------- from_shape : tuple of int or ndarray Shape of the original image. (Before resize.) to_shape : tuple of int or ndarray Shape of the new image. (After resize.) Returns ------- imgaug.augmentables.lines.LineString Line string with new coordinates. """ coords_proj = project_coords(self.coords, from_shape, to_shape) return self.copy(coords=coords_proj) def compute_out_of_image_fraction(self, image): """Compute fraction of polygon area outside of the image plane. This estimates ``f = A_ooi / A``, where ``A_ooi`` is the area of the polygon that is outside of the image plane, while ``A`` is the total area of the bounding box. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- float Fraction of the polygon area that is outside of the image plane. Returns ``0.0`` if the polygon is fully inside of the image plane. If the polygon has an area of zero, the polygon is treated similarly to a :class:`LineString`, i.e. the fraction of the line that is inside the image plane is returned. """ length = self.length if length == 0: if len(self.coords) == 0: return 0.0 points_ooi = ~self.get_pointwise_inside_image_mask(image) return 1.0 if np.all(points_ooi) else 0.0 lss_clipped = self.clip_out_of_image(image) length_after_clip = sum([ls.length for ls in lss_clipped]) inside_image_factor = length_after_clip / length return 1.0 - inside_image_factor def is_fully_within_image(self, image, default=False): """Estimate whether the line string is fully inside an image plane. Parameters ---------- image : ndarray or tuple of int Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. default : any Default value to return if the line string contains no points. Returns ------- bool or any ``True`` if the line string is fully inside the image area. ``False`` otherwise. Will return `default` if this line string contains no points. """ if len(self.coords) == 0: return default return np.all(self.get_pointwise_inside_image_mask(image)) def is_partly_within_image(self, image, default=False): """ Estimate whether the line string is at least partially inside the image. Parameters ---------- image : ndarray or tuple of int Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. default : any Default value to return if the line string contains no points. Returns ------- bool or any ``True`` if the line string is at least partially inside the image area. ``False`` otherwise. Will return `default` if this line string contains no points. """ if len(self.coords) == 0: return default # check mask first to avoid costly computation of intersection points # whenever possible mask = self.get_pointwise_inside_image_mask(image) if np.any(mask): return True return len(self.clip_out_of_image(image)) > 0 def is_out_of_image(self, image, fully=True, partly=False, default=True): """ Estimate whether the line is partially/fully outside of the image area. Parameters ---------- image : ndarray or tuple of int Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. fully : bool, optional Whether to return ``True`` if the line string is fully outside of the image area. partly : bool, optional Whether to return ``True`` if the line string is at least partially outside fo the image area. default : any Default value to return if the line string contains no points. Returns ------- bool or any ``True`` if the line string is partially/fully outside of the image area, depending on defined parameters. ``False`` otherwise. Will return `default` if this line string contains no points. """ if len(self.coords) == 0: return default if self.is_fully_within_image(image): return False if self.is_partly_within_image(image): return partly return fully def clip_out_of_image(self, image): """Clip off all parts of the line string that are outside of the image. Parameters ---------- image : ndarray or tuple of int Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. Returns ------- list of imgaug.augmentables.lines.LineString Line strings, clipped to the image shape. The result may contain any number of line strins, including zero. """ if len(self.coords) == 0: return [] inside_image_mask = self.get_pointwise_inside_image_mask(image) ooi_mask = ~inside_image_mask if len(self.coords) == 1: if not np.any(inside_image_mask): return [] return [self.copy()] if np.all(inside_image_mask): return [self.copy()] # top, right, bottom, left image edges # we subtract eps here, because intersection() works inclusively, # i.e. not subtracting eps would be equivalent to 0<=x<=C for C being # height or width # don't set the eps too low, otherwise points at height/width seem # to get rounded to height/width by shapely, which can cause problems # when first clipping and then calling is_fully_within_image() # returning false height, width = normalize_shape(image)[0:2] eps = 1e-3 edges = [ LineString([(0.0, 0.0), (width - eps, 0.0)]), LineString([(width - eps, 0.0), (width - eps, height - eps)]), LineString([(width - eps, height - eps), (0.0, height - eps)]), LineString([(0.0, height - eps), (0.0, 0.0)]) ] intersections = self.find_intersections_with(edges) points = [] gen = enumerate(zip(self.coords[:-1], self.coords[1:], ooi_mask[:-1], ooi_mask[1:], intersections)) for i, (line_start, line_end, ooi_start, ooi_end, inter_line) in gen: points.append((line_start, False, ooi_start)) for p_inter in inter_line: points.append((p_inter, True, False)) is_last = (i == len(self.coords) - 2) if is_last and not ooi_end: points.append((line_end, False, ooi_end)) lines = [] line = [] for i, (coord, was_added, ooi) in enumerate(points): # remove any point that is outside of the image, # also start a new line once such a point is detected if ooi: if len(line) > 0: lines.append(line) line = [] continue if not was_added: # add all points that were part of the original line string # AND that are inside the image plane line.append(coord) else: is_last_point = (i == len(points)-1) # ooi is a numpy.bool_, hence the bool(.) is_next_ooi = (not is_last_point and bool(points[i+1][2]) is True) # Add all points that were new (i.e. intersections), so # long that they aren't essentially identical to other point. # This prevents adding overlapping intersections multiple times. # (E.g. when a line intersects with a corner of the image plane # and also with one of its edges.) p_prev = line[-1] if len(line) > 0 else None # ignore next point if end reached or next point is out of image p_next = None if not is_last_point and not is_next_ooi: p_next = points[i+1][0] dist_prev = None dist_next = None if p_prev is not None: dist_prev = np.linalg.norm( np.float32(coord) - np.float32(p_prev)) if p_next is not None: dist_next = np.linalg.norm( np.float32(coord) - np.float32(p_next)) dist_prev_ok = (dist_prev is None or dist_prev > 1e-2) dist_next_ok = (dist_next is None or dist_next > 1e-2) if dist_prev_ok and dist_next_ok: line.append(coord) if len(line) > 0: lines.append(line) lines = [line for line in lines if len(line) > 0] return [self.deepcopy(coords=line) for line in lines] # TODO add tests for this # TODO extend this to non line string geometries def find_intersections_with(self, other): """Find all intersection points between this line string and `other`. Parameters ---------- other : tuple of number or list of tuple of number or list of LineString or LineString The other geometry to use during intersection tests. Returns ------- list of list of tuple of number All intersection points. One list per pair of consecutive start and end point, i.e. `N-1` lists of `N` points. Each list may be empty or may contain multiple points. """ import shapely.geometry geom = _convert_var_to_shapely_geometry(other) result = [] for p_start, p_end in zip(self.coords[:-1], self.coords[1:]): ls = shapely.geometry.LineString([p_start, p_end]) intersections = ls.intersection(geom) intersections = list(_flatten_shapely_collection(intersections)) intersections_points = [] for inter in intersections: if isinstance(inter, shapely.geometry.linestring.LineString): inter_start = (inter.coords[0][0], inter.coords[0][1]) inter_end = (inter.coords[-1][0], inter.coords[-1][1]) intersections_points.extend([inter_start, inter_end]) else: assert isinstance(inter, shapely.geometry.point.Point), ( "Expected to find shapely.geometry.point.Point or " "shapely.geometry.linestring.LineString intersection, " "actually found %s." % (type(inter),)) intersections_points.append((inter.x, inter.y)) # sort by distance to start point, this makes it later on easier # to remove duplicate points inter_sorted = sorted( intersections_points, key=lambda p, ps=p_start: np.linalg.norm(np.float32(p) - ps) ) result.append(inter_sorted) return result # TODO convert this to x/y params? def shift(self, top=None, right=None, bottom=None, left=None): """Move this line string along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift this object *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift this object *from* the right (towards the left). bottom : None or int, optional Amount of pixels by which to shift this object *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift this object *from* the left (towards the right). Returns ------- result : imgaug.augmentables.lines.LineString Shifted line string. """ top = top if top is not None else 0 right = right if right is not None else 0 bottom = bottom if bottom is not None else 0 left = left if left is not None else 0 coords = np.copy(self.coords) coords[:, 0] += left - right coords[:, 1] += top - bottom return self.copy(coords=coords) def draw_mask(self, image_shape, size_lines=1, size_points=0, raise_if_out_of_image=False): """Draw this line segment as a binary image mask. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line segments. size_points : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Boolean line mask of shape `image_shape` (no channel axis). """ heatmap = self.draw_heatmap_array( image_shape, alpha_lines=1.0, alpha_points=1.0, size_lines=size_lines, size_points=size_points, antialiased=False, raise_if_out_of_image=raise_if_out_of_image) return heatmap > 0.5 def draw_lines_heatmap_array(self, image_shape, alpha=1.0, size=1, antialiased=True, raise_if_out_of_image=False): """Draw the line segments of this line string as a heatmap array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. alpha : float, optional Opacity of the line string. Higher values denote a more visible line string. size : int, optional Thickness of the line segments. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray ``float32`` array of shape `image_shape` (no channel axis) with drawn line string. All values are in the interval ``[0.0, 1.0]``. """ assert len(image_shape) == 2 or ( len(image_shape) == 3 and image_shape[-1] == 1), ( "Expected (H,W) or (H,W,1) as image_shape, got %s." % ( image_shape,)) arr = self.draw_lines_on_image( np.zeros(image_shape, dtype=np.uint8), color=255, alpha=alpha, size=size, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image ) return arr.astype(np.float32) / 255.0 def draw_points_heatmap_array(self, image_shape, alpha=1.0, size=1, raise_if_out_of_image=False): """Draw the points of this line string as a heatmap array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the point mask. alpha : float, optional Opacity of the line string points. Higher values denote a more visible points. size : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray ``float32`` array of shape `image_shape` (no channel axis) with drawn line string points. All values are in the interval ``[0.0, 1.0]``. """ assert len(image_shape) == 2 or ( len(image_shape) == 3 and image_shape[-1] == 1), ( "Expected (H,W) or (H,W,1) as image_shape, got %s." % ( image_shape,)) arr = self.draw_points_on_image( np.zeros(image_shape, dtype=np.uint8), color=255, alpha=alpha, size=size, raise_if_out_of_image=raise_if_out_of_image ) return arr.astype(np.float32) / 255.0 def draw_heatmap_array(self, image_shape, alpha_lines=1.0, alpha_points=1.0, size_lines=1, size_points=0, antialiased=True, raise_if_out_of_image=False): """ Draw the line segments and points of the line string as a heatmap array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. alpha_lines : float, optional Opacity of the line string. Higher values denote a more visible line string. alpha_points : float, optional Opacity of the line string points. Higher values denote a more visible points. size_lines : int, optional Thickness of the line segments. size_points : int, optional Size of the points in pixels. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray ``float32`` array of shape `image_shape` (no channel axis) with drawn line segments and points. All values are in the interval ``[0.0, 1.0]``. """ heatmap_lines = self.draw_lines_heatmap_array( image_shape, alpha=alpha_lines, size=size_lines, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image) if size_points <= 0: return heatmap_lines heatmap_points = self.draw_points_heatmap_array( image_shape, alpha=alpha_points, size=size_points, raise_if_out_of_image=raise_if_out_of_image) heatmap = np.dstack([heatmap_lines, heatmap_points]) return np.max(heatmap, axis=2) # TODO only draw line on image of size BB around line, then paste into full # sized image def draw_lines_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3, antialiased=True, raise_if_out_of_image=False): """Draw the line segments of this line string on a given image. Parameters ---------- image : ndarray or tuple of int The image onto which to draw. Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C`` usually being ``3`` (other values are not tested). If a tuple, expected to be ``(H, W, C)`` and will lead to a new ``uint8`` array of zeros being created. color : int or iterable of int Color to use as RGB, i.e. three values. alpha : float, optional Opacity of the line string. Higher values denote a more visible line string. size : int, optional Thickness of the line segments. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray `image` with line drawn on it. """ # pylint: disable=invalid-name, misplaced-comparison-constant from .. import dtypes as iadt from ..augmenters import blend as blendlib image_was_empty = False if isinstance(image, tuple): image_was_empty = True image = np.zeros(image, dtype=np.uint8) assert image.ndim in [2, 3], ( "Expected image or shape of form (H,W) or (H,W,C), " "got shape %s." % (image.shape,)) if len(self.coords) <= 1 or alpha < 0 + 1e-4 or size < 1: return np.copy(image) if raise_if_out_of_image \ and self.is_out_of_image(image, partly=False, fully=True): raise Exception( "Cannot draw line string '%s' on image with shape %s, because " "it would be out of bounds." % ( self.__str__(), image.shape)) if image.ndim == 2: assert ia.is_single_number(color), ( "Got a 2D image. Expected then 'color' to be a single number, " "but got %s." % (str(color),)) color = [color] elif image.ndim == 3 and ia.is_single_number(color): color = [color] * image.shape[-1] image = image.astype(np.float32) height, width = image.shape[0:2] # We can't trivially exclude lines outside of the image here, because # even if start and end point are outside, there can still be parts of # the line inside the image. # TODO Do this with edge-wise intersection tests lines = [] for line_start, line_end in zip(self.coords[:-1], self.coords[1:]): # note that line() expects order (y1, x1, y2, x2), hence ([1], [0]) lines.append((line_start[1], line_start[0], line_end[1], line_end[0])) # skimage.draw.line can only handle integers lines = np.round(np.float32(lines)).astype(np.int32) # size == 0 is already covered above # Note here that we have to be careful not to draw lines two times # at their intersection points, e.g. for (p0, p1), (p1, 2) we could # end up drawing at p1 twice, leading to higher values if alpha is # used. color = np.float32(color) heatmap = np.zeros(image.shape[0:2], dtype=np.float32) for line in lines: if antialiased: rr, cc, val = skimage.draw.line_aa(*line) else: rr, cc = skimage.draw.line(*line) val = 1.0 # mask check here, because line() can generate coordinates # outside of the image plane rr_mask = np.logical_and(0 <= rr, rr < height) cc_mask = np.logical_and(0 <= cc, cc < width) mask = np.logical_and(rr_mask, cc_mask) if np.any(mask): rr = rr[mask] cc = cc[mask] val = val[mask] if not ia.is_single_number(val) else val heatmap[rr, cc] = val * alpha if size > 1: kernel = np.ones((size, size), dtype=np.uint8) heatmap = cv2.dilate(heatmap, kernel) if image_was_empty: image_blend = image + heatmap * color else: image_color_shape = image.shape[0:2] if image.ndim == 3: image_color_shape = image_color_shape + (1,) image_color = np.tile(color, image_color_shape) image_blend = blendlib.blend_alpha(image_color, image, heatmap) image_blend = iadt.restore_dtypes_(image_blend, np.uint8) return image_blend def draw_points_on_image(self, image, color=(0, 128, 0), alpha=1.0, size=3, copy=True, raise_if_out_of_image=False): """Draw the points of this line string onto a given image. Parameters ---------- image : ndarray or tuple of int The image onto which to draw. Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C`` usually being ``3`` (other values are not tested). If a tuple, expected to be ``(H, W, C)`` and will lead to a new ``uint8`` array of zeros being created. color : iterable of int Color to use as RGB, i.e. three values. alpha : float, optional Opacity of the line string points. Higher values denote a more visible points. size : int, optional Size of the points in pixels. copy : bool, optional Whether it is allowed to draw directly in the input array (``False``) or it has to be copied (``True``). The routine may still have to copy, even if ``copy=False`` was used. Always use the return value. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray ``float32`` array of shape `image_shape` (no channel axis) with drawn line string points. All values are in the interval ``[0.0, 1.0]``. """ from .kps import KeypointsOnImage kpsoi = KeypointsOnImage.from_xy_array(self.coords, shape=image.shape) image = kpsoi.draw_on_image( image, color=color, alpha=alpha, size=size, copy=copy, raise_if_out_of_image=raise_if_out_of_image) return image def draw_on_image(self, image, color=(0, 255, 0), color_lines=None, color_points=None, alpha=1.0, alpha_lines=None, alpha_points=None, size=1, size_lines=None, size_points=None, antialiased=True, raise_if_out_of_image=False): """Draw this line string onto an image. Parameters ---------- image : ndarray The `(H,W,C)` `uint8` image onto which to draw the line string. color : iterable of int, optional Color to use as RGB, i.e. three values. The color of the line and points are derived from this value, unless they are set. color_lines : None or iterable of int Color to use for the line segments as RGB, i.e. three values. If ``None``, this value is derived from `color`. color_points : None or iterable of int Color to use for the points as RGB, i.e. three values. If ``None``, this value is derived from ``0.5 * color``. alpha : float, optional Opacity of the line string. Higher values denote more visible points. The alphas of the line and points are derived from this value, unless they are set. alpha_lines : None or float, optional Opacity of the line string. Higher values denote more visible line string. If ``None``, this value is derived from `alpha`. alpha_points : None or float, optional Opacity of the line string points. Higher values denote more visible points. If ``None``, this value is derived from `alpha`. size : int, optional Size of the line string. The sizes of the line and points are derived from this value, unless they are set. size_lines : None or int, optional Thickness of the line segments. If ``None``, this value is derived from `size`. size_points : None or int, optional Size of the points in pixels. If ``None``, this value is derived from ``3 * size``. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. This does currently not affect the point drawing. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Image with line string drawn on it. """ def _assert_not_none(arg_name, arg_value): assert arg_value is not None, ( "Expected '%s' to not be None, got type %s." % ( arg_name, type(arg_value),)) _assert_not_none("color", color) _assert_not_none("alpha", alpha) _assert_not_none("size", size) color_lines = color_lines if color_lines is not None \ else np.float32(color) color_points = color_points if color_points is not None \ else np.float32(color) * 0.5 alpha_lines = alpha_lines if alpha_lines is not None \ else np.float32(alpha) alpha_points = alpha_points if alpha_points is not None \ else np.float32(alpha) size_lines = size_lines if size_lines is not None else size size_points = size_points if size_points is not None else size * 3 image = self.draw_lines_on_image( image, color=np.array(color_lines).astype(np.uint8), alpha=alpha_lines, size=size_lines, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image) image = self.draw_points_on_image( image, color=np.array(color_points).astype(np.uint8), alpha=alpha_points, size=size_points, copy=False, raise_if_out_of_image=raise_if_out_of_image) return image def extract_from_image(self, image, size=1, pad=True, pad_max=None, antialiased=True, prevent_zero_size=True): """Extract all image pixels covered by the line string. This will only extract pixels overlapping with the line string. As a rectangular image array has to be returned, non-overlapping pixels will be set to zero. This function will by default zero-pad the image if the line string is partially/fully outside of the image. This is for consistency with the same methods for bounding boxes and polygons. Parameters ---------- image : ndarray The image of shape `(H,W,[C])` from which to extract the pixels within the line string. size : int, optional Thickness of the line. pad : bool, optional Whether to zero-pad the image if the object is partially/fully outside of it. pad_max : None or int, optional The maximum number of pixels that may be zero-paded on any side, i.e. if this has value ``N`` the total maximum of added pixels is ``4*N``. This option exists to prevent extremely large images as a result of single points being moved very far away during augmentation. antialiased : bool, optional Whether to apply anti-aliasing to the line string. prevent_zero_size : bool, optional Whether to prevent height or width of the extracted image from becoming zero. If this is set to ``True`` and height or width of the line string is below ``1``, the height/width will be increased to ``1``. This can be useful to prevent problems, e.g. with image saving or plotting. If it is set to ``False``, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or ``W`` potentially being ``0``. Returns ------- (H',W') ndarray or (H',W',C) ndarray Pixels overlapping with the line string. Zero-padded if the line string is partially/fully outside of the image and ``pad=True``. If `prevent_zero_size` is activated, it is guarantueed that ``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``. """ from .bbs import BoundingBox assert image.ndim in [2, 3], ( "Expected image of shape (H,W,[C]), got shape %s." % ( image.shape,)) if len(self.coords) == 0 or size <= 0: if prevent_zero_size: return np.zeros((1, 1) + image.shape[2:], dtype=image.dtype) return np.zeros((0, 0) + image.shape[2:], dtype=image.dtype) xx = self.xx_int yy = self.yy_int # this would probably work if drawing was subpixel-accurate # x1 = np.min(self.coords[:, 0]) - (size / 2) # y1 = np.min(self.coords[:, 1]) - (size / 2) # x2 = np.max(self.coords[:, 0]) + (size / 2) # y2 = np.max(self.coords[:, 1]) + (size / 2) # this works currently with non-subpixel-accurate drawing sizeh = (size - 1) / 2 x1 = np.min(xx) - sizeh y1 = np.min(yy) - sizeh x2 = np.max(xx) + 1 + sizeh y2 = np.max(yy) + 1 + sizeh bb = BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2) if len(self.coords) == 1: return bb.extract_from_image(image, pad=pad, pad_max=pad_max, prevent_zero_size=prevent_zero_size) heatmap = self.draw_lines_heatmap_array( image.shape[0:2], alpha=1.0, size=size, antialiased=antialiased) if image.ndim == 3: heatmap = np.atleast_3d(heatmap) image_masked = image.astype(np.float32) * heatmap extract = bb.extract_from_image(image_masked, pad=pad, pad_max=pad_max, prevent_zero_size=prevent_zero_size) return np.clip(np.round(extract), 0, 255).astype(np.uint8) def concatenate(self, other): """Concatenate this line string with another one. This will add a line segment between the end point of this line string and the start point of `other`. Parameters ---------- other : imgaug.augmentables.lines.LineString or ndarray or iterable of tuple of number The points to add to this line string. Returns ------- imgaug.augmentables.lines.LineString New line string with concatenated points. The `label` of this line string will be kept. """ if not isinstance(other, LineString): other = LineString(other) return self.deepcopy( coords=np.concatenate([self.coords, other.coords], axis=0)) # TODO add tests def subdivide(self, points_per_edge): """Derive a new line string with ``N`` interpolated points per edge. The interpolated points have (per edge) regular distances to each other. For each edge between points ``A`` and ``B`` this adds points at ``A + (i/(1+N)) * (B - A)``, where ``i`` is the index of the added point and ``N`` is the number of points to add per edge. Calling this method two times will split each edge at its center and then again split each newly created edge at their center. It is equivalent to calling `subdivide(3)`. Parameters ---------- points_per_edge : int Number of points to interpolate on each edge. Returns ------- imgaug.augmentables.lines.LineString Line string with subdivided edges. """ if len(self.coords) <= 1 or points_per_edge < 1: return self.deepcopy() coords = interpolate_points(self.coords, nb_steps=points_per_edge, closed=False) return self.deepcopy(coords=coords) def to_keypoints(self): """Convert the line string points to keypoints. Returns ------- list of imgaug.augmentables.kps.Keypoint Points of the line string as keypoints. """ # TODO get rid of this deferred import from imgaug.augmentables.kps import Keypoint return [Keypoint(x=x, y=y) for (x, y) in self.coords] def to_bounding_box(self): """Generate a bounding box encapsulating the line string. Returns ------- None or imgaug.augmentables.bbs.BoundingBox Bounding box encapsulating the line string. ``None`` if the line string contained no points. """ from .bbs import BoundingBox # we don't have to mind the case of len(.) == 1 here, because # zero-sized BBs are considered valid if len(self.coords) == 0: return None return BoundingBox(x1=np.min(self.xx), y1=np.min(self.yy), x2=np.max(self.xx), y2=np.max(self.yy), label=self.label) def to_polygon(self): """Generate a polygon from the line string points. Returns ------- imgaug.augmentables.polys.Polygon Polygon with the same corner points as the line string. Note that the polygon might be invalid, e.g. contain less than ``3`` points or have self-intersections. """ from .polys import Polygon return Polygon(self.coords, label=self.label) def to_heatmap(self, image_shape, size_lines=1, size_points=0, antialiased=True, raise_if_out_of_image=False): """Generate a heatmap object from the line string. This is similar to :func:`imgaug.augmentables.lines.LineString.draw_lines_heatmap_array`, executed with ``alpha=1.0``. The result is wrapped in a :class:`imgaug.augmentables.heatmaps.HeatmapsOnImage` object instead of just an array. No points are drawn. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line. size_points : int, optional Size of the points in pixels. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- imgaug.augmentables.heatmaps.HeatmapsOnImage Heatmap object containing drawn line string. """ from .heatmaps import HeatmapsOnImage return HeatmapsOnImage( self.draw_heatmap_array( image_shape, size_lines=size_lines, size_points=size_points, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image), shape=image_shape ) def to_segmentation_map(self, image_shape, size_lines=1, size_points=0, raise_if_out_of_image=False): """Generate a segmentation map object from the line string. This is similar to :func:`imgaug.augmentables.lines.LineString.draw_mask`. The result is wrapped in a ``SegmentationMapsOnImage`` object instead of just an array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line. size_points : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- imgaug.augmentables.segmaps.SegmentationMapsOnImage Segmentation map object containing drawn line string. """ from .segmaps import SegmentationMapsOnImage return SegmentationMapsOnImage( self.draw_mask( image_shape, size_lines=size_lines, size_points=size_points, raise_if_out_of_image=raise_if_out_of_image), shape=image_shape ) # TODO make this non-approximate def coords_almost_equals(self, other, max_distance=1e-4, points_per_edge=8): """Compare this and another LineString's coordinates. This is an approximate method based on pointwise distances and can in rare corner cases produce wrong outputs. Parameters ---------- other : imgaug.augmentables.lines.LineString or tuple of number or ndarray or list of ndarray or list of tuple of number The other line string or its coordinates. max_distance : float, optional Max distance of any point from the other line string before the two line strings are evaluated to be unequal. points_per_edge : int, optional How many points to interpolate on each edge. Returns ------- bool Whether the two LineString's coordinates are almost identical, i.e. the max distance is below the threshold. If both have no coordinates, ``True`` is returned. If only one has no coordinates, ``False`` is returned. Beyond that, the number of points is not evaluated. """ if isinstance(other, LineString): pass elif isinstance(other, tuple): other = LineString([other]) else: other = LineString(other) if len(self.coords) == 0 and len(other.coords) == 0: return True if 0 in [len(self.coords), len(other.coords)]: # only one of the two line strings has no coords return False self_subd = self.subdivide(points_per_edge) other_subd = other.subdivide(points_per_edge) dist_self2other = self_subd.compute_pointwise_distances(other_subd) dist_other2self = other_subd.compute_pointwise_distances(self_subd) dist = max(np.max(dist_self2other), np.max(dist_other2self)) return dist < max_distance def almost_equals(self, other, max_distance=1e-4, points_per_edge=8): """Compare this and another line string. Parameters ---------- other: imgaug.augmentables.lines.LineString The other object to compare against. Expected to be a ``LineString``. max_distance : float, optional See :func:`imgaug.augmentables.lines.LineString.coords_almost_equals`. points_per_edge : int, optional See :func:`imgaug.augmentables.lines.LineString.coords_almost_equals`. Returns ------- bool ``True`` if the coordinates are almost equal and additionally the labels are equal. Otherwise ``False``. """ if self.label != other.label: return False return self.coords_almost_equals( other, max_distance=max_distance, points_per_edge=points_per_edge) def copy(self, coords=None, label=None): """Create a shallow copy of this line string. Parameters ---------- coords : None or iterable of tuple of number or ndarray If not ``None``, then the coords of the copied object will be set to this value. label : None or str If not ``None``, then the label of the copied object will be set to this value. Returns ------- imgaug.augmentables.lines.LineString Shallow copy. """ return LineString(coords=self.coords if coords is None else coords, label=self.label if label is None else label) def deepcopy(self, coords=None, label=None): """Create a deep copy of this line string. Parameters ---------- coords : None or iterable of tuple of number or ndarray If not ``None``, then the coords of the copied object will be set to this value. label : None or str If not ``None``, then the label of the copied object will be set to this value. Returns ------- imgaug.augmentables.lines.LineString Deep copy. """ return LineString( coords=np.copy(self.coords) if coords is None else coords, label=copylib.deepcopy(self.label) if label is None else label) def __getitem__(self, indices): """Get the coordinate(s) with given indices. Returns ------- ndarray xy-coordinate(s) as ``ndarray``. """ return self.coords[indices] def __iter__(self): """Iterate over the coordinates of this instance. Yields ------ ndarray An ``(2,)`` ``ndarray`` denoting an xy-coordinate pair. """ return iter(self.coords) def __repr__(self): return self.__str__() def __str__(self): points_str = ", ".join( ["(%.2f, %.2f)" % (x, y) for x, y in self.coords]) return "LineString([%s], label=%s)" % (points_str, self.label) # TODO # distance # hausdorff_distance # is_fully_within_image() # is_partly_within_image() # is_out_of_image() # draw() # draw_mask() # extract_from_image() # to_keypoints() # intersects(other) # concat(other) # is_self_intersecting() # remove_self_intersections() class LineStringsOnImage(IAugmentable): """Object that represents all line strings on a single image. Parameters ---------- line_strings : list of imgaug.augmentables.lines.LineString List of line strings on the image. shape : tuple of int or ndarray The shape of the image on which the objects are placed. Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. Examples -------- >>> import numpy as np >>> from imgaug.augmentables.lines import LineString, LineStringsOnImage >>> >>> image = np.zeros((100, 100)) >>> lss = [ >>> LineString([(0, 0), (10, 0)]), >>> LineString([(10, 20), (30, 30), (50, 70)]) >>> ] >>> lsoi = LineStringsOnImage(lss, shape=image.shape) """ def __init__(self, line_strings, shape): assert ia.is_iterable(line_strings), ( "Expected 'line_strings' to be an iterable, got type '%s'." % ( type(line_strings),)) assert all([isinstance(v, LineString) for v in line_strings]), ( "Expected iterable of LineString, got types: %s." % ( ", ".join([str(type(v)) for v in line_strings]) )) self.line_strings = line_strings self.shape = normalize_shape(shape) @property def items(self): """Get the line strings in this container. Returns ------- list of LineString Line strings within this container. """ return self.line_strings @property def empty(self): """Estimate whether this object contains zero line strings. Returns ------- bool ``True`` if this object contains zero line strings. """ return len(self.line_strings) == 0 def on(self, image): """Project the line strings from one image shape to a new one. Parameters ---------- image : ndarray or tuple of int The new image onto which to project. Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. Returns ------- imgaug.augmentables.lines.LineStrings Object containing all projected line strings. """ # pylint: disable=invalid-name shape = normalize_shape(image) if shape[0:2] == self.shape[0:2]: return self.deepcopy() line_strings = [ls.project(self.shape, shape) for ls in self.line_strings] return self.deepcopy(line_strings=line_strings, shape=shape) @classmethod def from_xy_arrays(cls, xy, shape): """Convert an ``(N,M,2)`` ndarray to a ``LineStringsOnImage`` object. This is the inverse of :func:`imgaug.augmentables.lines.LineStringsOnImage.to_xy_array`. Parameters ---------- xy : (N,M,2) ndarray or iterable of (M,2) ndarray Array containing the point coordinates ``N`` line strings with each ``M`` points given as ``(x,y)`` coordinates. ``M`` may differ if an iterable of arrays is used. Each array should usually be of dtype ``float32``. shape : tuple of int ``(H,W,[C])`` shape of the image on which the line strings are placed. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Object containing a list of ``LineString`` objects following the provided point coordinates. """ lss = [] for xy_ls in xy: lss.append(LineString(xy_ls)) return cls(lss, shape) def to_xy_arrays(self, dtype=np.float32): """Convert this object to an iterable of ``(M,2)`` arrays of points. This is the inverse of :func:`imgaug.augmentables.lines.LineStringsOnImage.from_xy_array`. Parameters ---------- dtype : numpy.dtype, optional Desired output datatype of the ndarray. Returns ------- list of ndarray The arrays of point coordinates, each given as ``(M,2)``. """ from .. import dtypes as iadt return [iadt.restore_dtypes_(np.copy(ls.coords), dtype) for ls in self.line_strings] def draw_on_image(self, image, color=(0, 255, 0), color_lines=None, color_points=None, alpha=1.0, alpha_lines=None, alpha_points=None, size=1, size_lines=None, size_points=None, antialiased=True, raise_if_out_of_image=False): """Draw all line strings onto a given image. Parameters ---------- image : ndarray The ``(H,W,C)`` ``uint8`` image onto which to draw the line strings. color : iterable of int, optional Color to use as RGB, i.e. three values. The color of the lines and points are derived from this value, unless they are set. color_lines : None or iterable of int Color to use for the line segments as RGB, i.e. three values. If ``None``, this value is derived from `color`. color_points : None or iterable of int Color to use for the points as RGB, i.e. three values. If ``None``, this value is derived from ``0.5 * color``. alpha : float, optional Opacity of the line strings. Higher values denote more visible points. The alphas of the line and points are derived from this value, unless they are set. alpha_lines : None or float, optional Opacity of the line strings. Higher values denote more visible line string. If ``None``, this value is derived from `alpha`. alpha_points : None or float, optional Opacity of the line string points. Higher values denote more visible points. If ``None``, this value is derived from `alpha`. size : int, optional Size of the line strings. The sizes of the line and points are derived from this value, unless they are set. size_lines : None or int, optional Thickness of the line segments. If ``None``, this value is derived from `size`. size_points : None or int, optional Size of the points in pixels. If ``None``, this value is derived from ``3 * size``. antialiased : bool, optional Whether to draw the lines with anti-aliasing activated. This does currently not affect the point drawing. raise_if_out_of_image : bool, optional Whether to raise an error if a line string is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Image with line strings drawn on it. """ # TODO improve efficiency here by copying only once for ls in self.line_strings: image = ls.draw_on_image( image, color=color, color_lines=color_lines, color_points=color_points, alpha=alpha, alpha_lines=alpha_lines, alpha_points=alpha_points, size=size, size_lines=size_lines, size_points=size_points, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image ) return image def remove_out_of_image(self, fully=True, partly=False): """ Remove all line strings that are fully/partially outside of an image. Parameters ---------- fully : bool, optional Whether to remove line strings that are fully outside of the image. partly : bool, optional Whether to remove line strings that are partially outside of the image. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Reduced set of line strings. Those that are fully/partially outside of the given image plane are removed. """ lss_clean = [ls for ls in self.line_strings if not ls.is_out_of_image( self.shape, fully=fully, partly=partly)] return LineStringsOnImage(lss_clean, shape=self.shape) def remove_out_of_image_fraction(self, fraction): """Remove all LS with an out of image fraction of at least `fraction`. Parameters ---------- fraction : number Minimum out of image fraction that a line string has to have in order to be removed. A fraction of ``1.0`` removes only line strings that are ``100%`` outside of the image. A fraction of ``0.0`` removes all line strings. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Reduced set of line strings, with those that had an out of image fraction greater or equal the given one removed. """ return _remove_out_of_image_fraction(self, fraction, LineStringsOnImage) def clip_out_of_image(self): """ Clip off all parts of the line strings that are outside of an image. .. note :: The result can contain fewer line strings than the input did. That happens when a polygon is fully outside of the image plane. .. note :: The result can also contain *more* line strings than the input did. That happens when distinct parts of a line string are only connected by line segments that are outside of the image plane and hence will be clipped off, resulting in two or more unconnected line string parts that are left in the image plane. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Line strings, clipped to fall within the image dimensions. The count of output line strings may differ from the input count. """ lss_cut = [ls_clipped for ls in self.line_strings for ls_clipped in ls.clip_out_of_image(self.shape)] return LineStringsOnImage(lss_cut, shape=self.shape) def shift(self, top=None, right=None, bottom=None, left=None): """Move the line strings along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift all objects *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift all objects *from* the right (towads the left). bottom : None or int, optional Amount of pixels by which to shift all objects *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift all objects *from* the left (towards the right). Returns ------- imgaug.augmentables.lines.LineStringsOnImage Shifted line strings. """ lss_new = [ls.shift(top=top, right=right, bottom=bottom, left=left) for ls in self.line_strings] return LineStringsOnImage(lss_new, shape=self.shape) def to_xy_array(self): """Convert all line string coordinates to one array of shape ``(N,2)``. Returns ------- (N, 2) ndarray Array containing all xy-coordinates of all line strings within this instance. """ if self.empty: return np.zeros((0, 2), dtype=np.float32) return np.concatenate([ls.coords for ls in self.line_strings]) def fill_from_xy_array_(self, xy): """Modify the corner coordinates of all line strings in-place. .. note :: This currently expects that `xy` contains exactly as many coordinates as the line strings within this instance have corner points. Otherwise, an ``AssertionError`` will be raised. Parameters ---------- xy : (N, 2) ndarray or iterable of iterable of number XY-Coordinates of ``N`` corner points. ``N`` must match the number of corner points in all line strings within this instance. Returns ------- LineStringsOnImage This instance itself, with updated coordinates. Note that the instance was modified in-place. """ xy = np.array(xy, dtype=np.float32) # note that np.array([]) is (0,), not (0, 2) assert xy.shape[0] == 0 or (xy.ndim == 2 and xy.shape[-1] == 2), ( # pylint: disable=unsubscriptable-object "Expected input array to have shape (N,2), " "got shape %s." % (xy.shape,)) counter = 0 for ls in self.line_strings: nb_points = len(ls.coords) assert counter + nb_points <= len(xy), ( "Received fewer points than there are corner points in " "all line strings. Got %d points, expected %d." % ( len(xy), sum([len(ls_.coords) for ls_ in self.line_strings]))) ls.coords[:, ...] = xy[counter:counter+nb_points] counter += nb_points assert counter == len(xy), ( "Expected to get exactly as many xy-coordinates as there are " "points in all line strings polygons within this instance. " "Got %d points, could only assign %d points." % ( len(xy), counter,)) return self def to_keypoints_on_image(self): """Convert the line strings to one ``KeypointsOnImage`` instance. Returns ------- imgaug.augmentables.kps.KeypointsOnImage A keypoints instance containing ``N`` coordinates for a total of ``N`` points in the ``coords`` attributes of all line strings. Order matches the order in ``line_strings`` and ``coords`` attributes. """ from . import KeypointsOnImage if self.empty: return KeypointsOnImage([], shape=self.shape) coords = np.concatenate( [ls.coords for ls in self.line_strings], axis=0) return KeypointsOnImage.from_xy_array(coords, shape=self.shape) def invert_to_keypoints_on_image_(self, kpsoi): """Invert the output of ``to_keypoints_on_image()`` in-place. This function writes in-place into this ``LineStringsOnImage`` instance. Parameters ---------- kpsoi : imgaug.augmentables.kps.KeypointsOnImages Keypoints to convert back to line strings, i.e. the outputs of ``to_keypoints_on_image()``. Returns ------- LineStringsOnImage Line strings container with updated coordinates. Note that the instance is also updated in-place. """ lss = self.line_strings coordss = [ls.coords for ls in lss] nb_points_exp = sum([len(coords) for coords in coordss]) assert len(kpsoi.keypoints) == nb_points_exp, ( "Expected %d coordinates, got %d." % ( nb_points_exp, len(kpsoi.keypoints))) xy_arr = kpsoi.to_xy_array() counter = 0 for ls in lss: coords = ls.coords coords[:, :] = xy_arr[counter:counter+len(coords), :] counter += len(coords) self.shape = kpsoi.shape return self def copy(self, line_strings=None, shape=None): """Create a shallow copy of this object. Parameters ---------- line_strings : None or list of imgaug.augmentables.lines.LineString, optional List of line strings on the image. If not ``None``, then the ``line_strings`` attribute of the copied object will be set to this value. shape : None or tuple of int or ndarray, optional The shape of the image on which the objects are placed. Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. If not ``None``, then the ``shape`` attribute of the copied object will be set to this value. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Shallow copy. """ lss = self.line_strings if line_strings is None else line_strings shape = self.shape if shape is None else shape return LineStringsOnImage(line_strings=lss, shape=shape) def deepcopy(self, line_strings=None, shape=None): """Create a deep copy of the object. Parameters ---------- line_strings : None or list of imgaug.augmentables.lines.LineString, optional List of line strings on the image. If not ``None``, then the ``line_strings`` attribute of the copied object will be set to this value. shape : None or tuple of int or ndarray, optional The shape of the image on which the objects are placed. Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. If not ``None``, then the ``shape`` attribute of the copied object will be set to this value. Returns ------- imgaug.augmentables.lines.LineStringsOnImage Deep copy. """ lss = self.line_strings if line_strings is None else line_strings shape = self.shape if shape is None else shape return LineStringsOnImage( line_strings=[ls.deepcopy() for ls in lss], shape=tuple(shape)) def __iter__(self): """Iterate over the line strings in this container. Yields ------ LineString A line string in this container. The order is identical to the order in the line string list provided upon class initialization. """ return iter(self.line_strings) def __repr__(self): return self.__str__() def __str__(self): return "LineStringsOnImage(%s, shape=%s)" % ( str(self.line_strings), self.shape) def _is_point_on_line(line_start, line_end, point, eps=1e-4): dist_s2e = np.linalg.norm(np.float32(line_start) - np.float32(line_end)) dist_s2p2e = ( np.linalg.norm(np.float32(line_start) - np.float32(point)) + np.linalg.norm(np.float32(point) - np.float32(line_end)) ) return -eps < (dist_s2p2e - dist_s2e) < eps def _flatten_shapely_collection(collection): import shapely.geometry if not isinstance(collection, list): collection = [collection] for item in collection: if hasattr(item, "geoms"): for subitem in _flatten_shapely_collection(item.geoms): # MultiPoint.geoms actually returns a GeometrySequence if isinstance(subitem, shapely.geometry.base.GeometrySequence): for subsubel in subitem: yield subsubel else: yield _flatten_shapely_collection(subitem) else: yield item def _convert_var_to_shapely_geometry(var): import shapely.geometry if isinstance(var, tuple): geom = shapely.geometry.Point(var[0], var[1]) elif isinstance(var, list): assert len(var) > 0, ( "Expected list to contain at least one coordinate, " "got %d coordinates." % (len(var),)) if isinstance(var[0], tuple): geom = shapely.geometry.LineString(var) elif all([isinstance(v, LineString) for v in var]): geom = shapely.geometry.MultiLineString([ shapely.geometry.LineString(ls.coords) for ls in var ]) else: raise ValueError( "Could not convert list-input to shapely geometry. Invalid " "datatype. List elements had datatypes: %s." % ( ", ".join([str(type(v)) for v in var]),)) elif isinstance(var, LineString): geom = shapely.geometry.LineString(var.coords) else: raise ValueError( "Could not convert input to shapely geometry. Invalid datatype. " "Got: %s" % (type(var),)) return geom
#!/usr/bin/env python3 # vi:nu:et:sts=4 ts=4 sw=4 """ Perform various automated code reviews on the source. The module must be executed from the repository that contains the Jenkinsfile. """ # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # For more information, please refer to <http://unlicense.org/> import argparse import json import os import re import subprocess import sys import time sys.path.append('./scripts') import util oArgs = None szAppName = 'App01sq' szGoDir = '${HOME}/go' ################################################################################ # Object Classes and Functions ################################################################################ #--------------------------------------------------------------------- # parse_args -- Parse the CLI Arguments #--------------------------------------------------------------------- def parse_args(listArgV=None): ''' ''' global oArgs # Parse the command line. szUsage = "usage: %prog [options] sourceDirectoryPath [destinationDirectoryPath]" cmd_prs = argparse.ArgumentParser( ) cmd_prs.add_argument('-b', '--build', action='store_false', dest='fBuild', default=True, help='Do not build genapp before using it' ) cmd_prs.add_argument('-d', '--debug', action='store_true', dest='fDebug', default=False, help='Set debug mode' ) cmd_prs.add_argument('-f', '--force', action='store_true', dest='fForce', default=False, help='Set force mode' ) cmd_prs.add_argument('-v', '--verbose', action='count', default=1, dest='iVerbose', help='increase output verbosity' ) cmd_prs.add_argument('--appdir', action='store', dest='szAppDir', default='/tmp', help='Set Application Base Directory' ) cmd_prs.add_argument('--appname', action='store', dest='szAppName', default='app01sq', help='Set Application Base Name' ) cmd_prs.add_argument('--bindir', action='store', dest='szBinDir', default='/tmp/bin', help='Set Binary Directory' ) cmd_prs.add_argument('--mdldir', action='store', dest='szModelDir', default='./models', help='Set genapp Model Directory' ) cmd_prs.add_argument('args', nargs=argparse.REMAINDER, default=[]) oArgs = cmd_prs.parse_args(listArgV) if oArgs.iVerbose: print('*****************************************') print('* Linting the Application *') print('*****************************************') print() oArgs.szAppPath = os.path.join(oArgs.szAppDir, oArgs.szAppName) if oArgs.fDebug: print("In DEBUG Mode...") print('Args:', oArgs) ################################################################################ # Main Program Processing ################################################################################ def main_cli(listArgV=None): """ Command-line interface. """ global oArgs # Parse the command line. parse_args(listArgV) # Perform the specified actions. iRc = 0 try: print("Do something here!") finally: pass return iRc ################################################################################ # Command-line interface ################################################################################ if '__main__' == __name__: startTime = time.time() iRc = main_cli(sys.argv[1:]) if oArgs.iVerbose or oArgs.fDebug: if 0 == iRc: print("...Successful completion.") else: print("...Completion Failure of %d" % (iRc)) endTime = time.time() if oArgs.iVerbose or oArgs.fDebug: print("Start Time: %s" % (time.ctime(startTime))) print("End Time: %s" % (time.ctime(endTime))) diffTime = endTime - startTime # float Time in seconds iSecs = int(diffTime % 60.0) iMins = int((diffTime / 60.0) % 60.0) iHrs = int(diffTime / 3600.0) print("run Time: %d:%02d:%02d" % (iHrs, iMins, iSecs)) sys.exit(iRc or 0)
# # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Database structure and objects supporting EdgeDB metadata.""" from __future__ import annotations from typing import * import re import textwrap from edb import _edgeql_rust from edb.common import context as parser_context from edb.common import debug from edb.common import exceptions from edb.common import uuidgen from edb.edgeql import qltypes from edb.edgeql import quote as qlquote from edb.schema import casts as s_casts from edb.schema import constraints as s_constr from edb.schema import links as s_links from edb.schema import name as s_name from edb.schema import objects as s_obj from edb.schema import objtypes as s_objtypes from edb.schema import pointers as s_pointers from edb.schema import properties as s_props from edb.schema import scalars as s_scalars from edb.schema import schema as s_schema from edb.schema import sources as s_sources from edb.schema import types as s_types from edb.server import defines from edb.server import compiler as edbcompiler from edb.server import bootstrap as edbbootstrap from edb.server import pgcluster from . import common from . import dbops from . import types if TYPE_CHECKING: import asyncpg q = common.qname qi = common.quote_ident ql = common.quote_literal qt = common.quote_type DATABASE_ID_NAMESPACE = uuidgen.UUID('0e6fed66-204b-11e9-8666-cffd58a5240b') CONFIG_ID_NAMESPACE = uuidgen.UUID('a48b38fa-349b-11e9-a6be-4f337f82f5ad') CONFIG_ID = uuidgen.UUID('172097a4-39f4-11e9-b189-9321eb2f4b97') class DBConfigTable(dbops.Table): def __init__(self) -> None: super().__init__(name=('edgedb', '_db_config')) self.add_columns([ dbops.Column(name='name', type='text'), dbops.Column(name='value', type='jsonb'), ]) self.add_constraint( dbops.UniqueConstraint( table_name=('edgedb', '_db_config'), columns=['name'], ), ) class ExpressionType(dbops.CompositeType): def __init__(self) -> None: super().__init__(name=('edgedb', 'expression_t')) self.add_columns([ dbops.Column(name='text', type='text'), dbops.Column(name='refs', type='uuid[]'), ]) class BigintDomain(dbops.Domain): """Bigint: a variant of numeric that enforces zero digits after the dot. We're using an explicit scale check as opposed to simply specifying the numeric bounds, because using bounds severly restricts the range of the numeric type (1000 vs 131072 digits). """ def __init__(self) -> None: super().__init__( name=('edgedb', 'bigint_t'), base='numeric', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'bigint_t'), expr=("scale(VALUE) = 0 AND VALUE != 'NaN'"), ), ), ) class TimestampTzDomain(dbops.Domain): """Timestamptz clamped to years 0001-9999. The default timestamp range of (4713 BC - 294276 AD) has problems: Postgres isn't ISO compliant with years out of the 0-9999 range and language compatibility is questionable. """ def __init__(self) -> None: super().__init__( name=('edgedb', 'timestamptz_t'), base='timestamptz', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'timestamptz_t'), expr=("EXTRACT(years from VALUE) BETWEEN 1 AND 9999"), ), ), ) class TimestampDomain(dbops.Domain): """Timestamp clamped to years 0001-9999. The default timestamp range of (4713 BC - 294276 AD) has problems: Postgres isn't ISO compliant with years out of the 0-9999 range and language compatibility is questionable. """ def __init__(self) -> None: super().__init__( name=('edgedb', 'timestamp_t'), base='timestamp', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'timestamp_t'), expr=("EXTRACT(years from VALUE) BETWEEN 1 AND 9999"), ), ), ) class DateDomain(dbops.Domain): """Date clamped to years 0001-9999. The default timestamp range of (4713 BC - 294276 AD) has problems: Postgres isn't ISO compliant with years out of the 0-9999 range and language compatibility is questionable. """ def __init__(self) -> None: super().__init__( name=('edgedb', 'date_t'), base='date', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'date_t'), expr=("EXTRACT(years from VALUE) BETWEEN 1 AND 9999"), ), ), ) class DurationDomain(dbops.Domain): def __init__(self) -> None: super().__init__( name=('edgedb', 'duration_t'), base='interval', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'duration_t'), expr=r''' EXTRACT(months from VALUE) = 0 AND EXTRACT(years from VALUE) = 0 AND EXTRACT(days from VALUE) = 0 ''', ), ), ) class RelativeDurationDomain(dbops.Domain): def __init__(self) -> None: super().__init__( name=('edgedb', 'relative_duration_t'), base='interval', constraints=( dbops.DomainCheckConstraint( domain_name=('edgedb', 'relative_duration_t'), expr="true", ), ), ) class AlterCurrentDatabaseSetString(dbops.Function): """Alter a PostgreSQL configuration parameter of the current database.""" text = ''' BEGIN EXECUTE 'ALTER DATABASE ' || quote_ident(current_database()) || ' SET ' || quote_ident(parameter) || ' = ' || coalesce(quote_literal(value), 'DEFAULT'); RETURN value; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_alter_current_database_set'), args=[('parameter', ('text',)), ('value', ('text',))], returns=('text',), volatility='volatile', language='plpgsql', text=self.text, ) class AlterCurrentDatabaseSetStringArray(dbops.Function): """Alter a PostgreSQL configuration parameter of the current database.""" text = ''' BEGIN EXECUTE 'ALTER DATABASE ' || quote_ident(current_database()) || ' SET ' || quote_ident(parameter) || ' = ' || coalesce( (SELECT array_to_string(array_agg(quote_literal(q.v)), ',') FROM unnest(value) AS q(v) ), 'DEFAULT' ); RETURN value; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_alter_current_database_set'), args=[ ('parameter', ('text',)), ('value', ('text[]',)), ], returns=('text[]',), volatility='volatile', language='plpgsql', text=self.text, ) class AlterCurrentDatabaseSetNonArray(dbops.Function): """Alter a PostgreSQL configuration parameter of the current database.""" text = ''' BEGIN EXECUTE 'ALTER DATABASE ' || quote_ident(current_database()) || ' SET ' || quote_ident(parameter) || ' = ' || coalesce(value::text, 'DEFAULT'); RETURN value; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_alter_current_database_set'), args=[ ('parameter', ('text',)), ('value', ('anynonarray',)), ], returns=('anynonarray',), volatility='volatile', language='plpgsql', text=self.text, ) class AlterCurrentDatabaseSetArray(dbops.Function): """Alter a PostgreSQL configuration parameter of the current database.""" text = ''' BEGIN EXECUTE 'ALTER DATABASE ' || quote_ident(current_database()) || ' SET ' || quote_ident(parameter) || ' = ' || coalesce( (SELECT array_to_string(array_agg(q.v::text), ',') FROM unnest(value) AS q(v) ), 'DEFAULT' ); RETURN value; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_alter_current_database_set'), args=[ ('parameter', ('text',)), ('value', ('anyarray',)), ], returns=('anyarray',), volatility='volatile', language='plpgsql', text=self.text, ) class StrToBigint(dbops.Function): """Parse bigint from text.""" text = r''' SELECT (CASE WHEN scale(v.column1) = 0 THEN v.column1 ELSE edgedb.raise( NULL::numeric, 'invalid_text_representation', msg => ( 'invalid syntax for edgedb.bigint_t: ' || quote_literal(val) ) ) END)::edgedb.bigint_t FROM (VALUES ( val::numeric )) AS v ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_bigint'), args=[('val', ('text',))], returns=('edgedb', 'bigint_t'), # Stable because it's raising exceptions. volatility='stable', text=self.text) class StrToDecimal(dbops.Function): """Parse decimal from text.""" text = r''' SELECT (CASE WHEN v.column1 != 'NaN' THEN v.column1 ELSE edgedb.raise( NULL::numeric, 'invalid_text_representation', msg => ( 'invalid syntax for numeric: ' || quote_literal(val) ) ) END) FROM (VALUES ( val::numeric )) AS v ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_decimal'), args=[('val', ('text',))], returns=('numeric',), # Stable because it's raising exceptions. volatility='stable', text=self.text, ) class StrToInt64NoInline(dbops.Function): """String-to-int64 cast with noinline guard. Adding a LIMIT clause to the function statement makes it uninlinable due to the Postgres inlining heuristic looking for simple SELECT expressions only (i.e. no clauses.) This might need to change in the future if the heuristic changes. """ text = r''' SELECT "val"::bigint LIMIT 1 ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_int64_noinline'), args=[('val', ('text',))], returns=('bigint',), volatility='stable', text=self.text, ) class StrToInt32NoInline(dbops.Function): """String-to-int32 cast with noinline guard.""" text = r''' SELECT "val"::int LIMIT 1 ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_int32_noinline'), args=[('val', ('text',))], returns=('int',), volatility='stable', text=self.text, ) class StrToInt16NoInline(dbops.Function): """String-to-int16 cast with noinline guard.""" text = r''' SELECT "val"::smallint LIMIT 1 ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_int16_noinline'), args=[('val', ('text',))], returns=('smallint',), volatility='stable', text=self.text, ) class StrToFloat64NoInline(dbops.Function): """String-to-float64 cast with noinline guard.""" text = r''' SELECT "val"::float8 LIMIT 1 ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_float64_noinline'), args=[('val', ('text',))], returns=('float8',), volatility='stable', text=self.text, ) class StrToFloat32NoInline(dbops.Function): """String-to-float32 cast with noinline guard.""" text = r''' SELECT "val"::float4 LIMIT 1 ; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_float32_noinline'), args=[('val', ('text',))], returns=('float4',), volatility='stable', text=self.text, ) class GetBackendCapabilitiesFunction(dbops.Function): text = f''' SELECT (json ->> 'capabilities')::bigint FROM edgedbinstdata.instdata WHERE key = 'backend_instance_params' ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_backend_capabilities'), args=[], returns=('bigint',), language='sql', volatility='stable', text=self.text, ) class GetBackendTenantIDFunction(dbops.Function): text = f''' SELECT (json ->> 'tenant_id')::text FROM edgedbinstdata.instdata WHERE key = 'backend_instance_params' ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_backend_tenant_id'), args=[], returns=('text',), language='sql', volatility='stable', text=self.text, ) class GetDatabaseBackendNameFunction(dbops.Function): text = f''' SELECT edgedb.get_backend_tenant_id() || '_' || "db_name" ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_database_backend_name'), args=[('db_name', ('text',))], returns=('text',), language='sql', volatility='stable', text=self.text, ) class GetRoleBackendNameFunction(dbops.Function): text = f''' SELECT edgedb.get_backend_tenant_id() || '_' || "role_name" ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_role_backend_name'), args=[('role_name', ('text',))], returns=('text',), language='sql', volatility='stable', text=self.text, ) class GetUserSequenceBackendNameFunction(dbops.Function): text = f""" SELECT 'edgedbpub', "sequence_type_id"::text || '_sequence' """ def __init__(self) -> None: super().__init__( name=('edgedb', 'get_user_sequence_backend_name'), args=[('sequence_type_id', ('uuid',))], returns=('record',), language='sql', volatility='stable', text=self.text, ) class GetSequenceBackendNameFunction(dbops.Function): text = f''' SELECT (CASE WHEN edgedb.get_name_module(st.name) = any(edgedb.get_std_modules()) THEN 'edgedbstd' ELSE 'edgedbpub' END), "sequence_type_id"::text || '_sequence' FROM edgedb."_SchemaScalarType" AS st WHERE st.id = "sequence_type_id" ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_sequence_backend_name'), args=[('sequence_type_id', ('uuid',))], returns=('record',), language='sql', volatility='stable', text=self.text, ) class GetStdModulesFunction(dbops.Function): text = f''' SELECT ARRAY[{",".join(ql(str(m)) for m in s_schema.STD_MODULES)}] ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_std_modules'), args=[], returns=('text[]',), language='sql', volatility='immutable', text=self.text, ) class GetObjectMetadata(dbops.Function): """Return EdgeDB metadata associated with a backend object.""" text = ''' SELECT CASE WHEN substr(d, 1, char_length({prefix})) = {prefix} THEN substr(d, char_length({prefix}) + 1)::jsonb ELSE '{{}}'::jsonb END FROM obj_description("objoid", "objclass") AS d '''.format( prefix=f'E{ql(defines.EDGEDB_VISIBLE_METADATA_PREFIX)}', ) def __init__(self) -> None: super().__init__( name=('edgedb', 'obj_metadata'), args=[('objoid', ('oid',)), ('objclass', ('text',))], returns=('jsonb',), volatility='stable', text=self.text) class GetColumnMetadata(dbops.Function): """Return EdgeDB metadata associated with a backend object.""" text = ''' SELECT CASE WHEN substr(d, 1, char_length({prefix})) = {prefix} THEN substr(d, char_length({prefix}) + 1)::jsonb ELSE '{{}}'::jsonb END FROM col_description("tableoid", "column") AS d '''.format( prefix=f'E{ql(defines.EDGEDB_VISIBLE_METADATA_PREFIX)}', ) def __init__(self) -> None: super().__init__( name=('edgedb', 'col_metadata'), args=[('tableoid', ('oid',)), ('column', ('integer',))], returns=('jsonb',), volatility='stable', text=self.text) class GetSharedObjectMetadata(dbops.Function): """Return EdgeDB metadata associated with a backend object.""" text = ''' SELECT CASE WHEN substr(d, 1, char_length({prefix})) = {prefix} THEN substr(d, char_length({prefix}) + 1)::jsonb ELSE '{{}}'::jsonb END FROM shobj_description("objoid", "objclass") AS d '''.format( prefix=f'E{ql(defines.EDGEDB_VISIBLE_METADATA_PREFIX)}', ) def __init__(self) -> None: super().__init__( name=('edgedb', 'shobj_metadata'), args=[('objoid', ('oid',)), ('objclass', ('text',))], returns=('jsonb',), volatility='stable', text=self.text) class GetDatabaseMetadataFunction(dbops.Function): """Return EdgeDB metadata associated with a given database.""" text = ''' SELECT edgedb.shobj_metadata( (SELECT oid FROM pg_database WHERE datname = edgedb.get_database_backend_name("dbname") ), 'pg_database' ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_database_metadata'), args=[('dbname', ('text',))], returns=('jsonb',), volatility='stable', text=self.text, ) class GetCurrentDatabaseFunction(dbops.Function): text = f''' SELECT substr( current_database(), char_length(edgedb.get_backend_tenant_id()) + 2 ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_current_database'), args=[], returns=('text',), language='sql', volatility='stable', text=self.text, ) class RaiseExceptionFunction(dbops.Function): text = ''' BEGIN RAISE EXCEPTION USING ERRCODE = "exc", MESSAGE = "msg", DETAIL = COALESCE("detail", ''), HINT = COALESCE("hint", ''), COLUMN = COALESCE("column", ''), CONSTRAINT = COALESCE("constraint", ''), DATATYPE = COALESCE("datatype", ''), TABLE = COALESCE("table", ''), SCHEMA = COALESCE("schema", ''); RETURN "rtype"; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'raise'), args=[ ('rtype', ('anyelement',)), ('exc', ('text',), "'raise_exception'"), ('msg', ('text',), "''"), ('detail', ('text',), "''"), ('hint', ('text',), "''"), ('column', ('text',), "''"), ('constraint', ('text',), "''"), ('datatype', ('text',), "''"), ('table', ('text',), "''"), ('schema', ('text',), "''"), ], returns=('anyelement',), # NOTE: The main reason why we don't want this function to be # immutable is that immutable functions can be # pre-evaluated by the query planner once if they have # constant arguments. This means that using this function # as the second argument in a COALESCE will raise an # exception regardless of whether the first argument is # NULL or not. volatility='stable', language='plpgsql', text=self.text, ) class RaiseExceptionOnNullFunction(dbops.Function): """Return the passed value or raise an exception if it's NULL.""" text = ''' SELECT coalesce( val, edgedb.raise( val, exc, msg => msg, detail => detail, hint => hint, "column" => "column", "constraint" => "constraint", "datatype" => "datatype", "table" => "table", "schema" => "schema" ) ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'raise_on_null'), args=[ ('val', ('anyelement',)), ('exc', ('text',)), ('msg', ('text',)), ('detail', ('text',), "''"), ('hint', ('text',), "''"), ('column', ('text',), "''"), ('constraint', ('text',), "''"), ('datatype', ('text',), "''"), ('table', ('text',), "''"), ('schema', ('text',), "''"), ], returns=('anyelement',), # Same volatility as raise() volatility='stable', text=self.text, ) class RaiseExceptionOnNotNullFunction(dbops.Function): """Return the passed value or raise an exception if it's NOT NULL.""" text = ''' SELECT CASE WHEN val IS NULL THEN val ELSE edgedb.raise( val, exc, msg => msg, detail => detail, hint => hint, "column" => "column", "constraint" => "constraint", "datatype" => "datatype", "table" => "table", "schema" => "schema" ) END ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'raise_on_not_null'), args=[ ('val', ('anyelement',)), ('exc', ('text',)), ('msg', ('text',)), ('detail', ('text',), "''"), ('hint', ('text',), "''"), ('column', ('text',), "''"), ('constraint', ('text',), "''"), ('datatype', ('text',), "''"), ('table', ('text',), "''"), ('schema', ('text',), "''"), ], returns=('anyelement',), # Same volatility as raise() volatility='stable', text=self.text, ) class RaiseExceptionOnEmptyStringFunction(dbops.Function): """Return the passed string or raise an exception if it's empty.""" text = ''' SELECT CASE WHEN edgedb._length(val) = 0 THEN edgedb.raise(val, exc, msg => msg, detail => detail) ELSE val END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'raise_on_empty'), args=[ ('val', ('anyelement',)), ('exc', ('text',)), ('msg', ('text',)), ('detail', ('text',), "''"), ], returns=('anyelement',), # Same volatility as raise() volatility='stable', text=self.text, ) class AssertJSONTypeFunction(dbops.Function): """Assert that the JSON type matches what is expected.""" text = ''' SELECT CASE WHEN array_position(typenames, jsonb_typeof(val)) IS NULL THEN edgedb.raise( NULL::jsonb, 'wrong_object_type', msg => coalesce( msg, ( 'expected json ' || array_to_string(typenames, ' or ') || '; got json ' || coalesce(jsonb_typeof(val), 'UNKNOWN') ) ), detail => detail ) ELSE val END ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'jsonb_assert_type'), args=[ ('val', ('jsonb',)), ('typenames', ('text[]',)), ('msg', ('text',), 'NULL'), ('detail', ('text',), "''"), ], returns=('jsonb',), # Max volatility of raise() and array_to_string() (stable) volatility='stable', text=self.text, ) class ExtractJSONScalarFunction(dbops.Function): """Convert a given JSON scalar value into a text value.""" text = ''' SELECT (to_jsonb(ARRAY[ edgedb.jsonb_assert_type( coalesce(val, 'null'::jsonb), ARRAY[json_typename, 'null'], msg => msg, detail => detail ) ])->>0) ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'jsonb_extract_scalar'), args=[ ('val', ('jsonb',)), ('json_typename', ('text',)), ('msg', ('text',), 'NULL'), ('detail', ('text',), "''"), ], returns=('text',), volatility='stable', text=self.text, ) class GetSchemaObjectNameFunction(dbops.Function): text = ''' SELECT coalesce( (SELECT name FROM edgedb."_SchemaObject" WHERE id = type::uuid), edgedb.raise( NULL::text, msg => 'resolve_type_name: unknown type: "' || type || '"' ) ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_get_schema_object_name'), args=[('type', ('uuid',))], returns=('text',), # Max volatility of raise() and a SELECT from a # table (stable). volatility='stable', text=self.text, strict=True, ) class IssubclassFunction(dbops.Function): text = ''' SELECT clsid = any(classes) OR ( SELECT classes && q.ancestors FROM (SELECT array_agg(o.target) AS ancestors FROM edgedb."_SchemaInheritingObject__ancestors" o WHERE o.source = clsid ) AS q ); ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'issubclass'), args=[('clsid', 'uuid'), ('classes', 'uuid[]')], returns='bool', volatility='stable', text=self.__class__.text) class IssubclassFunction2(dbops.Function): text = ''' SELECT clsid = pclsid OR ( SELECT pclsid IN ( SELECT o.target FROM edgedb."_SchemaInheritingObject__ancestors" o WHERE o.source = clsid ) ); ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'issubclass'), args=[('clsid', 'uuid'), ('pclsid', 'uuid')], returns='bool', volatility='stable', text=self.__class__.text) class NormalizeNameFunction(dbops.Function): text = ''' SELECT CASE WHEN strpos(name, '@') = 0 THEN name ELSE CASE WHEN strpos(name, '::') = 0 THEN replace(split_part(name, '@', 1), '|', '::') ELSE replace( split_part( -- "reverse" calls are to emulate "rsplit" reverse(split_part(reverse(name), '::', 1)), '@', 1), '|', '::') END END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'shortname_from_fullname'), args=[('name', 'text')], returns='text', volatility='immutable', language='sql', text=self.__class__.text) class GetNameModuleFunction(dbops.Function): text = ''' SELECT reverse(split_part(reverse("name"), '::', 1)) ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_name_module'), args=[('name', 'text')], returns='text', volatility='immutable', language='sql', text=self.__class__.text) class NullIfArrayNullsFunction(dbops.Function): """Check if array contains NULLs and if so, return NULL.""" def __init__(self) -> None: super().__init__( name=('edgedb', '_nullif_array_nulls'), args=[('a', 'anyarray')], returns='anyarray', volatility='stable', language='sql', text=''' SELECT CASE WHEN array_position(a, NULL) IS NULL THEN a ELSE NULL END ''') class IndexDescType(dbops.CompositeType): """Introspected index description.""" def __init__(self) -> None: super().__init__(name=('edgedb', 'intro_index_desc_t')) self.add_columns([ dbops.Column(name='table_name', type='text[]'), dbops.Column(name='name', type='text'), dbops.Column(name='is_unique', type='bool'), dbops.Column(name='predicate', type='text'), dbops.Column(name='expression', type='text'), dbops.Column(name='columns', type='text[]'), dbops.Column(name='metadata', type='jsonb'), ]) class IntrospectIndexesFunction(dbops.Function): """Return set of indexes for each table.""" text = ''' SELECT i.table_name, i.index_name, i.index_is_unique, i.index_predicate, i.index_expression, i.index_columns, i.index_metadata FROM (SELECT * FROM (SELECT ARRAY[ns.nspname::text, c.relname::text] AS table_name, ic.relname::text AS index_name, i.indisunique AS index_is_unique, pg_get_expr(i.indpred, i.indrelid)::text AS index_predicate, pg_get_expr(i.indexprs, i.indrelid)::text AS index_expression, (SELECT array_agg(ia.attname::text ORDER BY ia.attnum) FROM pg_attribute AS ia WHERE ia.attrelid = i.indexrelid AND (ia.attnum IS NULL OR ia.attnum >= 1) ) AS index_columns, edgedb.obj_metadata(i.indexrelid, 'pg_class') AS index_metadata FROM pg_class AS c INNER JOIN pg_namespace AS ns ON ns.oid = c.relnamespace INNER JOIN pg_index AS i ON i.indrelid = c.oid INNER JOIN pg_class AS ic ON i.indexrelid = ic.oid WHERE ($1::text IS NULL OR ns.nspname LIKE $1::text) AND ($2::text IS NULL OR c.relname LIKE $2::text) AND ($3::text[] IS NULL OR ns.nspname || '.' || ic.relname = any($3::text[])) AND ($4::text IS NULL OR ic.relname LIKE $4::text) ) AS q WHERE (NOT $5::bool OR (index_metadata IS NOT NULL AND (index_metadata->>'ddl:inherit')::bool)) AND ( $6 OR ( index_metadata IS NULL OR NOT coalesce( (index_metadata->>'ddl:inherited')::bool, false) ) ) ) AS i ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'introspect_indexes'), args=[ ('schema_pattern', 'text', 'NULL'), ('table_pattern', 'text', 'NULL'), ('table_list', 'text[]', 'NULL'), ('index_pattern', 'text', 'NULL'), ('inheritable_only', 'bool', 'FALSE'), ('include_inherited', 'bool', 'FALSE'), ], returns=('edgedb', 'intro_index_desc_t'), set_returning=True, volatility='stable', language='sql', text=self.__class__.text) class TriggerDescType(dbops.CompositeType): """Introspected trigger description.""" def __init__(self) -> None: super().__init__(name=('edgedb', 'intro_trigger_desc_t')) self.add_columns([ dbops.Column(name='table_name', type='text[]'), dbops.Column(name='name', type='text'), dbops.Column(name='proc', type='text[]'), dbops.Column(name='is_constraint', type='bool'), dbops.Column(name='granularity', type='text'), dbops.Column(name='deferred', type='bool'), dbops.Column(name='timing', type='text'), dbops.Column(name='events', type='text[]'), dbops.Column(name='definition', type='text'), dbops.Column(name='condition', type='text'), dbops.Column(name='metadata', type='jsonb'), ]) class IntrospectTriggersFunction(dbops.Function): """Return a set of triggers for each table.""" text = ''' SELECT table_name, trg_name, trg_proc, trg_constraint, trg_granularity, trg_deferred, trg_timing, trg_events, trg_definition, NULL::text, trg_metadata FROM (SELECT * FROM (SELECT ARRAY[ns.nspname::text, tc.relname::text] AS table_name, t.oid::int AS trg_id, t.tgname::text AS trg_name, (SELECT ARRAY[nsp.nspname::text, p.proname::text] FROM pg_proc AS p INNER JOIN pg_namespace AS nsp ON nsp.oid = p.pronamespace WHERE t.tgfoid = p.oid ) AS trg_proc, t.tgconstraint != 0 AS trg_constraint, (CASE WHEN (t.tgtype & (1 << 0)) != 0 THEN 'row' ELSE 'statement' END) AS trg_granularity, t.tginitdeferred AS trg_deferred, (CASE WHEN (t.tgtype & (1 << 1)) != 0 THEN 'before' WHEN (t.tgtype & (1 << 6)) != 0 THEN 'instead' ELSE 'after' END) AS trg_timing, array_remove(ARRAY[ (CASE WHEN (t.tgtype & (1 << 2)) != 0 THEN 'insert' ELSE NULL END), (CASE WHEN (t.tgtype & (1 << 3)) != 0 THEN 'delete' ELSE NULL END), (CASE WHEN (t.tgtype & (1 << 4)) != 0 THEN 'update' ELSE NULL END), (CASE WHEN (t.tgtype & (1 << 5)) != 0 THEN 'truncate' ELSE NULL END) ]::text[], NULL) AS trg_events, pg_get_triggerdef(t.oid)::text AS trg_definition, edgedb.obj_metadata(t.oid, 'pg_trigger') AS trg_metadata FROM pg_trigger AS t INNER JOIN pg_class AS tc ON t.tgrelid = tc.oid INNER JOIN pg_namespace AS ns ON ns.oid = tc.relnamespace WHERE ($1::text IS NULL OR ns.nspname LIKE $1::text) AND ($2::text IS NULL OR tc.relname LIKE $2::text) AND ($3::text[] IS NULL OR ns.nspname || '.' || tc.relname = any($3::text[])) AND ($4::text IS NULL OR t.tgname LIKE $4::text) ) AS q WHERE (NOT $5::bool OR (trg_metadata IS NOT NULL AND (trg_metadata->>'ddl:inherit')::bool)) AND ( $6 OR ( trg_metadata IS NULL OR NOT coalesce( (trg_metadata->>'ddl:inherited')::bool, false) ) ) ) AS t ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'introspect_triggers'), args=[ ('schema_pattern', 'text', 'NULL'), ('table_pattern', 'text', 'NULL'), ('table_list', 'text[]', 'NULL'), ('trigger_pattern', 'text', 'NULL'), ('inheritable_only', 'bool', 'FALSE'), ('include_inherited', 'bool', 'FALSE'), ], returns=('edgedb', 'intro_trigger_desc_t'), set_returning=True, volatility='stable', language='sql', text=self.__class__.text) class TableInheritanceDescType(dbops.CompositeType): """Introspected table inheritance descriptor.""" def __init__(self) -> None: super().__init__(name=('edgedb', 'intro_tab_inh_t')) self.add_columns([ dbops.Column(name='name', type='text[]'), dbops.Column(name='depth', type='int'), dbops.Column(name='pos', type='int'), ]) class GetTableDescendantsFunction(dbops.Function): """Return a set of table descendants.""" text = ''' SELECT * FROM (WITH RECURSIVE inheritance(oid, name, ns, depth, path) AS ( SELECT c.oid, c.relname, ns.nspname, 0, ARRAY[c.relname] FROM pg_class c INNER JOIN pg_namespace ns ON c.relnamespace = ns.oid WHERE ($1::text IS NULL OR ns.nspname LIKE $1::text) AND ($2::text IS NULL OR c.relname LIKE $2::text) UNION ALL SELECT c.oid, c.relname, ns.nspname, i.depth + 1, i.path || c.relname FROM pg_class c, inheritance i, pg_inherits pgi, pg_namespace ns WHERE i.oid = pgi.inhparent AND c.oid = pgi.inhrelid AND ns.oid = c.relnamespace AND ($3::int IS NULL OR i.depth < $3::int) ) SELECT DISTINCT ON (ns, name) ARRAY[ns::text, name::text], depth, 0 FROM inheritance) q WHERE depth > 0 ORDER BY depth ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_table_descendants'), args=[ ('schema_name', 'text'), ('table_name', 'text'), ('max_depth', 'int', 'NULL'), ], returns=('edgedb', 'intro_tab_inh_t'), set_returning=True, volatility='stable', language='sql', text=self.__class__.text) class ParseTriggerConditionFunction(dbops.Function): """Return a set of table descendants.""" text = ''' DECLARE when_off integer; pos integer; brackets integer; chr text; def_len integer; BEGIN def_len := char_length(definition); when_off := strpos(definition, 'WHEN ('); IF when_off IS NULL OR when_off = 0 THEN RETURN NULL; ELSE pos := when_off + 6; brackets := 1; WHILE brackets > 0 AND pos < def_len LOOP chr := substr(definition, pos, 1); IF chr = ')' THEN brackets := brackets - 1; ELSIF chr = '(' THEN brackets := brackets + 1; END IF; pos := pos + 1; END LOOP; IF brackets != 0 THEN RAISE EXCEPTION 'cannot parse trigger condition: %', definition; END IF; RETURN substr( definition, when_off + 6, pos - (when_off + 6) - 1 ); END IF; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_parse_trigger_condition'), args=[ ('definition', 'text'), ], returns='text', volatility='stable', language='plpgsql', text=self.__class__.text) class NormalizeArrayIndexFunction(dbops.Function): """Convert an EdgeQL index to SQL index.""" text = ''' SELECT ( CASE WHEN index < 0 THEN length + index + 1 ELSE index + 1 END )::int ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_normalize_array_index'), args=[('index', ('bigint',)), ('length', ('int',))], returns=('int',), volatility='immutable', strict=True, text=self.text) class ArrayIndexWithBoundsFunction(dbops.Function): """Get an array element or raise an out-of-bounds exception.""" text = ''' SELECT edgedb.raise_on_null( val[edgedb._normalize_array_index(index, array_upper(val, 1))], 'array_subscript_error', msg => 'array index ' || index::text || ' is out of bounds', detail => detail ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_index'), args=[('val', ('anyarray',)), ('index', ('bigint',)), ('detail', ('text',))], returns=('anyelement',), # Same volatility as raise() volatility='stable', strict=True, text=self.text, ) class ArraySliceFunction(dbops.Function): """Get an array slice.""" text = ''' SELECT CASE WHEN start IS NULL THEN val[:edgedb._normalize_array_index( stop, array_upper(val, 1)) - 1] WHEN stop IS NULL THEN val[edgedb._normalize_array_index( start, array_upper(val, 1)):] ELSE val[edgedb._normalize_array_index( start, array_upper(val, 1)): edgedb._normalize_array_index( stop, array_upper(val, 1)) - 1] END ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_slice'), args=[('val', ('anyarray',)), ('start', ('bigint',)), ('stop', ('bigint',))], returns=('anyarray',), volatility='immutable', text=self.text, ) class StringIndexWithBoundsFunction(dbops.Function): """Get a string character or raise an out-of-bounds exception.""" text = ''' SELECT edgedb.raise_on_empty( substr( "val", edgedb._normalize_array_index("index", char_length("val")), 1 ), 'invalid_parameter_value', 'string index ' || "index"::text || ' is out of bounds', "detail" ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_index'), args=[ ('val', ('text',)), ('index', ('bigint',)), ('detail', ('text',)), ], returns=('text',), # Same volatility as raise_on_empty volatility='stable', strict=True, text=self.text, ) class BytesIndexWithBoundsFunction(dbops.Function): """Get a bytes character or raise an out-of-bounds exception.""" text = ''' SELECT edgedb.raise_on_empty( substr( "val", edgedb._normalize_array_index("index", length("val")), 1 ), 'invalid_parameter_value', 'byte string index ' || "index"::text || ' is out of bounds', "detail" ) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_index'), args=[ ('val', ('bytea',)), ('index', ('bigint',)), ('detail', ('text',)), ], returns=('bytea',), # Same volatility as raise_on_empty volatility='stable', strict=True, text=self.text, ) class SubstrProxyFunction(dbops.Function): """Same as substr, but interpret negative length as 0 instead.""" text = r''' SELECT CASE WHEN length < 0 THEN '' ELSE substr(val, start::int, length) END ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_substr'), args=[('val', ('anyelement',)), ('start', ('bigint',)), ('length', ('int',))], returns=('anyelement',), volatility='immutable', strict=True, text=self.text) class LengthStringProxyFunction(dbops.Function): """Same as substr, but interpret negative length as 0 instead.""" text = r''' SELECT char_length(val) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_length'), args=[('val', ('text',))], returns=('int',), volatility='immutable', strict=True, text=self.text) class LengthBytesProxyFunction(dbops.Function): """Same as substr, but interpret negative length as 0 instead.""" text = r''' SELECT length(val) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_length'), args=[('val', ('bytea',))], returns=('int',), volatility='immutable', strict=True, text=self.text) class StringSliceImplFunction(dbops.Function): """Get a string slice.""" text = r''' SELECT CASE WHEN start IS NULL THEN edgedb._substr( val, 1, edgedb._normalize_array_index( stop, edgedb._length(val)) - 1 ) WHEN stop IS NULL THEN substr( val, edgedb._normalize_array_index( start, edgedb._length(val)) ) ELSE edgedb._substr( val, edgedb._normalize_array_index( start, edgedb._length(val)), edgedb._normalize_array_index( stop, edgedb._length(val)) - edgedb._normalize_array_index( start, edgedb._length(val)) ) END ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_str_slice'), args=[ ('val', ('anyelement',)), ('start', ('bigint',)), ('stop', ('bigint',)) ], returns=('anyelement',), volatility='immutable', text=self.text) class StringSliceFunction(dbops.Function): """Get a string slice.""" text = r''' SELECT edgedb._str_slice(val, start, stop) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_slice'), args=[ ('val', ('text',)), ('start', ('bigint',)), ('stop', ('bigint',)) ], returns=('text',), volatility='immutable', text=self.text) class BytesSliceFunction(dbops.Function): """Get a string slice.""" text = r''' SELECT edgedb._str_slice(val, start, stop) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_slice'), args=[ ('val', ('bytea',)), ('start', ('bigint',)), ('stop', ('bigint',)) ], returns=('bytea',), volatility='immutable', text=self.text) class JSONIndexByTextFunction(dbops.Function): """Get a JSON element by text index or raise an exception.""" text = r''' SELECT CASE jsonb_typeof(val) WHEN 'object' THEN ( edgedb.raise_on_null( val -> index, 'invalid_parameter_value', msg => ( 'json index ' || quote_literal(index) || ' is out of bounds' ), detail => detail ) ) WHEN 'array' THEN ( edgedb.raise( NULL::jsonb, 'wrong_object_type', msg => ( 'cannot index json ' || jsonb_typeof(val) || ' by ' || pg_typeof(index)::text ), detail => detail ) ) ELSE edgedb.raise( NULL::jsonb, 'wrong_object_type', msg => ( 'cannot index json ' || coalesce(jsonb_typeof(val), 'UNKNOWN') ), detail => detail ) END ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_index'), args=[ ('val', ('jsonb',)), ('index', ('text',)), ('detail', ('text',), "''"), ], returns=('jsonb',), # Same volatility as exception helpers volatility='stable', strict=True, text=self.text, ) class JSONIndexByIntFunction(dbops.Function): """Get a JSON element by int index or raise an exception.""" text = r''' SELECT CASE jsonb_typeof(val) WHEN 'object' THEN ( edgedb.raise( NULL::jsonb, 'wrong_object_type', msg => ( 'cannot index json ' || jsonb_typeof(val) || ' by ' || pg_typeof(index)::text ), detail => detail ) ) WHEN 'array' THEN ( edgedb.raise_on_null( val -> index::int, 'invalid_parameter_value', msg => 'json index ' || index::text || ' is out of bounds', detail => detail ) ) ELSE edgedb.raise( NULL::jsonb, 'wrong_object_type', msg => ( 'cannot index json ' || coalesce(jsonb_typeof(val), 'UNKNOWN') ), detail => detail ) END ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_index'), args=[ ('val', ('jsonb',)), ('index', ('bigint',)), ('detail', ('text',), "''"), ], returns=('jsonb',), # Min volatility of exception helpers and pg_typeof (stable). volatility='stable', strict=True, text=self.text, ) class JSONSliceFunction(dbops.Function): """Get a JSON array slice.""" text = r''' SELECT to_jsonb(_slice( ( SELECT array_agg(value) FROM jsonb_array_elements( edgedb.jsonb_assert_type(val, ARRAY['array'])) ), start, stop )) ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_slice'), args=[('val', ('jsonb',)), ('start', ('bigint',)), ('stop', ('bigint',))], returns=('jsonb',), # Same volatility as to_jsonb (stable) volatility='stable', text=self.text) # We need custom casting functions for various datetime scalars in # order to enforce correctness w.r.t. local vs time-zone-aware # datetime. Postgres does a lot of magic and guessing for time zones # and generally will accept text with or without time zone for any # particular flavor of timestamp. In order to guarantee that we can # detect time-zones we restrict the inputs to ISO8601 format. # # See issue #740. class DatetimeInFunction(dbops.Function): """Cast text into timestamptz using ISO8601 spec.""" text = r''' SELECT CASE WHEN val !~ ( '^\s*(' || '(\d{4}-\d{2}-\d{2}|\d{8})' || '[ tT]' || '(\d{2}(:\d{2}(:\d{2}(\.\d+)?)?)?|\d{2,6}(\.\d+)?)' || '([zZ]|[-+](\d{2,4}|\d{2}:\d{2}))' || ')\s*$' ) THEN edgedb.raise( NULL::edgedb.timestamptz_t, 'invalid_datetime_format', msg => ( 'invalid input syntax for type timestamptz: ' || quote_literal(val) ), detail => ( '{"hint":"Please use ISO8601 format. Example: ' || '2010-12-27T23:59:59-07:00 Alternatively ' || '\"to_datetime\" function provides custom ' || 'formatting options."}' ) ) ELSE val::edgedb.timestamptz_t END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'datetime_in'), args=[('val', ('text',))], returns=('edgedb', 'timestamptz_t'), # Same volatility as raise() (stable) volatility='stable', text=self.text) class DurationInFunction(dbops.Function): """Cast text into duration, ensuring there is no days or months units""" text = r''' SELECT CASE WHEN EXTRACT(MONTH FROM v.column1) != 0 OR EXTRACT(YEAR FROM v.column1) != 0 OR EXTRACT(DAY FROM v.column1) != 0 THEN edgedb.raise( NULL::edgedb.duration_t, 'invalid_datetime_format', msg => ( 'invalid input syntax for type std::duration: ' || quote_literal(val) ), detail => ( '{"hint":"Units bigger than days cannot be used ' || 'for std::duration."}' ) ) ELSE v.column1::edgedb.duration_t END FROM (VALUES ( val::interval )) AS v ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'duration_in'), args=[('val', ('text',))], returns=('edgedb', 'duration_t'), # Same volatility as raise() (stable) volatility='stable', text=self.text, ) class LocalDatetimeInFunction(dbops.Function): """Cast text into timestamp using ISO8601 spec.""" text = r''' SELECT CASE WHEN val !~ ( '^\s*(' || '(\d{4}-\d{2}-\d{2}|\d{8})' || '[ tT]' || '(\d{2}(:\d{2}(:\d{2}(\.\d+)?)?)?|\d{2,6}(\.\d+)?)' || ')\s*$' ) THEN edgedb.raise( NULL::edgedb.timestamp_t, 'invalid_datetime_format', msg => ( 'invalid input syntax for type timestamp: ' || quote_literal(val) ), detail => ( '{"hint":"Please use ISO8601 format. Example ' || '2010-04-18T09:27:00 Alternatively ' || '\"to_local_datetime\" function provides custom ' || 'formatting options."}' ) ) ELSE val::edgedb.timestamp_t END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'local_datetime_in'), args=[('val', ('text',))], returns=('edgedb', 'timestamp_t'), # Same volatility as raise() (stable) volatility='stable', text=self.text) class LocalDateInFunction(dbops.Function): """Cast text into date using ISO8601 spec.""" text = r''' SELECT CASE WHEN val !~ ( '^\s*(' || '(\d{4}-\d{2}-\d{2}|\d{8})' || ')\s*$' ) THEN edgedb.raise( NULL::edgedb.date_t, 'invalid_datetime_format', msg => ( 'invalid input syntax for type date: ' || quote_literal(val) ), detail => ( '{"hint":"Please use ISO8601 format. Example ' || '2010-04-18 Alternatively ' || '\"to_local_date\" function provides custom ' || 'formatting options."}' ) ) ELSE val::edgedb.date_t END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'local_date_in'), args=[('val', ('text',))], returns=('edgedb', 'date_t'), # Same volatility as raise() (stable) volatility='stable', text=self.text) class LocalTimeInFunction(dbops.Function): """Cast text into time using ISO8601 spec.""" text = r''' SELECT CASE WHEN val !~ ( '^\s*(' || '(\d{2}(:\d{2}(:\d{2}(\.\d+)?)?)?|\d{2,6}(\.\d+)?)' || ')\s*$' ) THEN edgedb.raise( NULL::time, 'invalid_datetime_format', msg => ( 'invalid input syntax for type time: ' || quote_literal(val) ), detail => ( '{"hint":"Please use ISO8601 format. Examples: ' || '18:43:27 or 18:43 Alternatively ' || '\"to_local_time\" function provides custom ' || 'formatting options."}' ) ) ELSE val::time END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'local_time_in'), args=[('val', ('text',))], returns=('time',), # Same volatility as raise() (stable) volatility='stable', text=self.text, ) class ToTimestampTZCheck(dbops.Function): """Checks if the original text has time zone or not.""" # What are we trying to mitigate? # We're trying to detect that when we're casting to datetime the # time zone is in fact present in the input. It is a problem if # it's not since then one gets assigned implicitly based on the # server settings. # # It is insufficient to rely on the presence of TZH in the format # string, since `to_timestamp` will happily ignore the missing # time-zone in the input anyway. So in order to tell whether the # input string contained a time zone that was in fact parsed we # employ the following trick: # # If the time zone is in the input then it is unambiguous and the # parsed value will not depend on the current server time zone. # However, if the time zone was omitted, then the parsed value # will default to the server time zone. This implies that if # changing the server time zone for the same input string affects # the parsed value, the input string itself didn't contain a time # zone. text = r''' DECLARE result timestamptz; chk timestamptz; msg text; BEGIN result := to_timestamp(val, fmt); PERFORM set_config('TimeZone', 'America/Toronto', true); chk := to_timestamp(val, fmt); -- We're deliberately not doing any save/restore because -- the server MUST be in UTC. In fact, this check relies -- on it. PERFORM set_config('TimeZone', 'UTC', true); IF hastz THEN msg := 'missing required'; ELSE msg := 'unexpected'; END IF; IF (result = chk) != hastz THEN RAISE EXCEPTION USING ERRCODE = 'invalid_datetime_format', MESSAGE = msg || ' time zone in input ' || quote_literal(val), DETAIL = ''; END IF; RETURN result::edgedb.timestamptz_t; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_to_timestamptz_check'), args=[('val', ('text',)), ('fmt', ('text',)), ('hastz', ('bool',))], returns=('edgedb', 'timestamptz_t'), # We're relying on changing settings, so it's volatile. volatility='volatile', language='plpgsql', text=self.text) class ToDatetimeFunction(dbops.Function): """Convert text into timestamptz using a formatting spec.""" # NOTE that if only the TZM (minutes) are mentioned it is not # enough for a valid time zone definition text = r''' SELECT CASE WHEN fmt !~ ( '^(' || '("([^"\\]|\\.)*")|' || '([^"]+)' || ')*(TZH).*$' ) THEN edgedb.raise( NULL::edgedb.timestamptz_t, 'invalid_datetime_format', msg => ( 'missing required time zone in format: ' || quote_literal(fmt) ), detail => ( $h${"hint":"Use one or both of the following: $h$ || $h$'TZH', 'TZM'"}$h$ ) ) ELSE edgedb._to_timestamptz_check(val, fmt, true) END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'to_datetime'), args=[('val', ('text',)), ('fmt', ('text',))], returns=('edgedb', 'timestamptz_t'), # Same as _to_timestamptz_check. volatility='volatile', text=self.text) class ToLocalDatetimeFunction(dbops.Function): """Convert text into timestamp using a formatting spec.""" # NOTE time zone should not be mentioned at all. text = r''' SELECT CASE WHEN fmt ~ ( '^(' || '("([^"\\]|\\.)*")|' || '([^"]+)' || ')*(TZH|TZM).*$' ) THEN edgedb.raise( NULL::edgedb.timestamp_t, 'invalid_datetime_format', msg => ( 'unexpected time zone in format: ' || quote_literal(fmt) ) ) ELSE edgedb._to_timestamptz_check(val, fmt, false) ::edgedb.timestamp_t END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'to_local_datetime'), args=[('val', ('text',)), ('fmt', ('text',))], returns=('edgedb', 'timestamp_t'), # Same as _to_timestamptz_check. volatility='volatile', text=self.text) class StrToBool(dbops.Function): """Parse bool from text.""" # We first try to match case-insensitive "true|false" at all. On # null, we raise an exception. But otherwise we know that we have # an array of matches. The first element matching "true" and # second - "false". So the boolean value is then "true" if the # second array element is NULL and false otherwise. text = r''' SELECT ( coalesce( regexp_match(val, '^\s*(?:(true)|(false))\s*$', 'i')::text[], edgedb.raise( NULL::text[], 'invalid_text_representation', msg => 'invalid syntax for bool: ' || quote_literal(val) ) ) )[2] IS NULL; ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'str_to_bool'), args=[('val', ('text',))], returns=('bool',), # Stable because it's raising exceptions. volatility='stable', text=self.text) class QuoteLiteralFunction(dbops.Function): """Encode string as edgeql literal quoted string""" text = r''' SELECT concat('\'', replace( replace(val, '\\', '\\\\'), '\'', '\\\''), '\'') ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'quote_literal'), args=[('val', ('text',))], returns=('str',), volatility='immutable', text=self.text) class QuoteIdentFunction(dbops.Function): """Quote ident function.""" # TODO do not quote valid identifiers unless they are reserved text = r''' SELECT concat('`', replace(val, '`', '``'), '`') ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'quote_ident'), args=[('val', ('text',))], returns=('text',), volatility='immutable', text=self.text, ) class QuoteNameFunction(dbops.Function): text = r""" SELECT string_agg(edgedb.quote_ident(np), '::') FROM unnest(string_to_array("name", '::')) AS np """ def __init__(self) -> None: super().__init__( name=('edgedb', 'quote_name'), args=[('name', ('text',))], returns=('text',), volatility='immutable', text=self.text, ) class DescribeRolesAsDDLFunctionForwardDecl(dbops.Function): """Forward declaration for _describe_roles_as_ddl""" def __init__(self) -> None: super().__init__( name=('edgedb', '_describe_roles_as_ddl'), args=[], returns=('text'), # Stable because it's raising exceptions. volatility='stable', text='SELECT NULL::text', ) class DescribeRolesAsDDLFunction(dbops.Function): """Describe roles as DDL""" def __init__(self, schema: s_schema.Schema) -> None: role_obj = schema.get("sys::Role", type=s_objtypes.ObjectType) roles = inhviewname(schema, role_obj) member_of = role_obj.getptr(schema, s_name.UnqualName('member_of')) members = inhviewname(schema, member_of) name_col = ptr_col_name(schema, role_obj, 'name') pass_col = ptr_col_name(schema, role_obj, 'password') qi_superuser = qlquote.quote_ident(defines.EDGEDB_SUPERUSER) text = f""" WITH RECURSIVE dependencies AS ( SELECT r.id AS id, m.target AS parent FROM {q(*roles)} r LEFT OUTER JOIN {q(*members)} m ON r.id = m.source ), roles_with_depths(id, depth) AS ( SELECT id, 0 FROM dependencies WHERE parent IS NULL UNION ALL SELECT dependencies.id, roles_with_depths.depth + 1 FROM dependencies INNER JOIN roles_with_depths ON dependencies.parent = roles_with_depths.id ), ordered_roles AS ( SELECT id, max(depth) FROM roles_with_depths GROUP BY id ORDER BY max(depth) ASC ) SELECT coalesce(string_agg( CASE WHEN role.{qi(name_col)} = { ql(defines.EDGEDB_SUPERUSER) } THEN NULLIF(concat( 'ALTER ROLE { qi_superuser } {{', NULLIF((SELECT concat( ' EXTENDING ', string_agg( edgedb.quote_ident(parent.{qi(name_col)}), ', ' ), ';' ) FROM {q(*members)} member INNER JOIN {q(*roles)} parent ON parent.id = member.target WHERE member.source = role.id ), ' EXTENDING ;'), CASE WHEN role.{qi(pass_col)} IS NOT NULL THEN concat(' SET password_hash := ', quote_literal(role.{qi(pass_col)}), ';') ELSE '' END, '}};' ), 'ALTER ROLE { qi_superuser } {{}};') ELSE concat( 'CREATE SUPERUSER ROLE ', edgedb.quote_ident(role.{qi(name_col)}), NULLIF((SELECT concat(' EXTENDING ', string_agg( edgedb.quote_ident(parent.{qi(name_col)}), ', ' ) ) FROM {q(*members)} member INNER JOIN {q(*roles)} parent ON parent.id = member.target WHERE member.source = role.id ), ' EXTENDING '), CASE WHEN role.{qi(pass_col)} IS NOT NULL THEN concat(' {{ SET password_hash := ', quote_literal(role.{qi(pass_col)}), '}};') ELSE ';' END ) END, '\n' ), '') str FROM ordered_roles JOIN {q(*roles)} role ON role.id = ordered_roles.id """ super().__init__( name=('edgedb', '_describe_roles_as_ddl'), args=[], returns=('text'), # Stable because it's raising exceptions. volatility='stable', text=text) class DescribeSystemConfigAsDDLFunctionForwardDecl(dbops.Function): def __init__(self) -> None: super().__init__( name=('edgedb', '_describe_system_config_as_ddl'), args=[], returns=('text'), volatility='stable', text='SELECT NULL::text', ) class DescribeDatabaseConfigAsDDLFunctionForwardDecl(dbops.Function): def __init__(self) -> None: super().__init__( name=('edgedb', '_describe_database_config_as_ddl'), args=[], returns=('text'), volatility='stable', text='SELECT NULL::text', ) class DumpSequencesFunction(dbops.Function): text = r""" SELECT string_agg( 'SELECT std::sequence_reset(' || 'INTROSPECT ' || edgedb.quote_name(seq.name) || (CASE WHEN seq_st.is_called THEN ', ' || seq_st.last_value::text ELSE '' END) || ');', E'\n' ) FROM (SELECT id, name FROM edgedb."_SchemaScalarType" WHERE id = any("seqs") ) AS seq, LATERAL ( SELECT COALESCE(last_value, start_value)::text AS last_value, last_value IS NOT NULL AS is_called FROM pg_sequences, LATERAL ROWS FROM ( edgedb.get_sequence_backend_name(seq.id) ) AS seq_name(schema text, name text) WHERE (pg_sequences.schemaname, pg_sequences.sequencename) = (seq_name.schema, seq_name.name) ) AS seq_st """ def __init__(self) -> None: super().__init__( name=('edgedb', '_dump_sequences'), args=[('seqs', ('uuid[]',))], returns=('text',), # Volatile because sequence state is volatile volatility='volatile', text=self.text, ) class SysConfigSourceType(dbops.Enum): def __init__(self) -> None: super().__init__( name=('edgedb', '_sys_config_source_t'), values=[ 'default', 'postgres default', 'postgres environment variable', 'postgres configuration file', 'postgres command line', 'postgres global', 'system override', 'database', 'postgres client', 'postgres override', 'postgres interactive', 'postgres test', 'session', ] ) class SysConfigScopeType(dbops.Enum): def __init__(self) -> None: super().__init__( name=('edgedb', '_sys_config_scope_t'), values=[ 'SYSTEM', 'DATABASE', 'SESSION', ] ) class SysConfigValueType(dbops.CompositeType): """Type of values returned by _read_sys_config.""" def __init__(self) -> None: super().__init__(name=('edgedb', '_sys_config_val_t')) self.add_columns([ dbops.Column(name='name', type='text'), dbops.Column(name='value', type='jsonb'), dbops.Column(name='source', type='edgedb._sys_config_source_t'), dbops.Column(name='scope', type='edgedb._sys_config_scope_t'), ]) class SysConfigFullFunction(dbops.Function): # This is a function because "_edgecon_state" is a temporary table # and therefore cannot be used in a view. text = f''' BEGIN RETURN QUERY EXECUTE $$ WITH config_spec AS ( SELECT s.key AS name, s.value->'default' AS default, (s.value->>'internal')::bool AS internal, (s.value->>'system')::bool AS system, (s.value->>'typeid')::uuid AS typeid, (s.value->>'typemod') AS typemod, (s.value->>'backend_setting') AS backend_setting FROM jsonb_each( (SELECT json FROM edgedbinstdata.instdata WHERE key = 'configspec') ) AS s ), config_defaults AS ( SELECT s.name AS name, s.default AS value, 'default' AS source FROM config_spec s ), config_sys AS ( SELECT s.key AS name, s.value AS value, 'system override' AS source FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_SYSTEM_DB)} ) -> 'sysconfig' ) AS s ), config_db AS ( SELECT s.name AS name, s.value AS value, 'database' AS source FROM edgedb._db_config s ), config_sess AS ( SELECT s.name AS name, s.value AS value, 'session' AS source FROM _edgecon_state s WHERE s.type = 'C' ), pg_db_setting AS ( SELECT nameval.name, to_jsonb(nameval.value) AS value, 'database' AS source FROM (SELECT setconfig FROM pg_db_role_setting WHERE setdatabase = ( SELECT oid FROM pg_database WHERE datname = current_database() ) AND setrole = 0 ) AS cfg_array, LATERAL unnest(cfg_array.setconfig) AS cfg_set(s), LATERAL ( SELECT split_part(cfg_set.s, '=', 1) AS name, split_part(cfg_set.s, '=', 2) AS value ) AS nameval, LATERAL ( SELECT config_spec.name FROM config_spec WHERE nameval.name = config_spec.backend_setting ) AS spec ), pg_conf_settings AS ( SELECT spec.name, to_jsonb(setting) AS value, 'postgres configuration file' AS source FROM pg_file_settings, LATERAL ( SELECT config_spec.name FROM config_spec WHERE pg_file_settings.name = config_spec.backend_setting ) AS spec WHERE sourcefile != (( SELECT setting FROM pg_settings WHERE name = 'data_directory' ) || '/postgresql.auto.conf') AND applied ), pg_auto_conf_settings AS ( SELECT spec.name, to_jsonb(setting) AS value, 'system override' AS source FROM pg_file_settings, LATERAL ( SELECT config_spec.name FROM config_spec WHERE pg_file_settings.name = config_spec.backend_setting ) AS spec WHERE sourcefile = (( SELECT setting FROM pg_settings WHERE name = 'data_directory' ) || '/postgresql.auto.conf') AND applied ), pg_config AS ( SELECT spec.name, to_jsonb( CASE WHEN u.v[1] IS NOT NULL THEN (settings.setting::int * (u.v[1])::int)::text || u.v[2] ELSE settings.setting || COALESCE(settings.unit, '') END ) AS value, source AS source FROM ( SELECT pg_settings.name AS name, pg_settings.unit AS unit, pg_settings.setting AS setting, (CASE WHEN pg_settings.source IN ('session', 'database') THEN pg_settings.source ELSE 'postgres ' || pg_settings.source END) AS source FROM pg_settings ) AS settings, LATERAL ( SELECT regexp_match(settings.unit, '(\\d+)(\\w+)') AS v ) AS u, LATERAL ( SELECT config_spec.name FROM config_spec WHERE settings.name = config_spec.backend_setting ) AS spec ) SELECT q.name, q.value, q.source, (CASE WHEN q.source < 'database'::edgedb._sys_config_source_t THEN 'SYSTEM' WHEN q.source = 'database'::edgedb._sys_config_source_t THEN 'DATABASE' ELSE 'SESSION' END)::edgedb._sys_config_scope_t AS scope FROM (SELECT u.name, u.value, u.source::edgedb._sys_config_source_t, row_number() OVER ( PARTITION BY u.name ORDER BY u.source::edgedb._sys_config_source_t DESC ) AS n FROM (SELECT * FROM ( SELECT * FROM config_defaults UNION ALL SELECT * FROM config_sys UNION ALL SELECT * FROM config_db UNION ALL SELECT * FROM config_sess UNION ALL SELECT * FROM pg_db_setting UNION ALL SELECT * FROM pg_conf_settings UNION ALL SELECT * FROM pg_auto_conf_settings UNION ALL SELECT * FROM pg_config ) AS q WHERE ($1 IS NULL OR q.source::edgedb._sys_config_source_t = any($1)) AND ($2 IS NULL OR q.source::edgedb._sys_config_source_t <= $2) ) AS u ) AS q WHERE q.n = 1; $$ USING source_filter, max_source; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_read_sys_config_full'), args=[ ( 'source_filter', ('edgedb', '_sys_config_source_t[]',), 'NULL', ), ( 'max_source', ('edgedb', '_sys_config_source_t'), 'NULL', ), ], returns=('edgedb', '_sys_config_val_t'), set_returning=True, language='plpgsql', volatility='volatile', text=self.text, ) class SysConfigNoFileAccessFunction(dbops.Function): text = f''' BEGIN RETURN QUERY EXECUTE $$ WITH config_spec AS ( SELECT s.key AS name, s.value->'default' AS default, (s.value->>'internal')::bool AS internal, (s.value->>'system')::bool AS system, (s.value->>'typeid')::uuid AS typeid, (s.value->>'typemod') AS typemod, (s.value->>'backend_setting') AS backend_setting FROM jsonb_each( (SELECT json FROM edgedbinstdata.instdata WHERE key = 'configspec') ) AS s ), config_defaults AS ( SELECT s.name AS name, s.default AS value, 'default' AS source FROM config_spec s ), config_sys AS ( SELECT s.key AS name, s.value AS value, 'system override' AS source FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_SYSTEM_DB)} ) -> 'sysconfig' ) AS s ), config_db AS ( SELECT s.name AS name, s.value AS value, 'database' AS source FROM edgedb._db_config s ), config_sess AS ( SELECT s.name AS name, s.value AS value, 'session' AS source FROM _edgecon_state s WHERE s.type = 'C' ), pg_db_setting AS ( SELECT nameval.name, to_jsonb(nameval.value) AS value, 'database' AS source FROM (SELECT setconfig FROM pg_db_role_setting WHERE setdatabase = ( SELECT oid FROM pg_database WHERE datname = current_database() ) AND setrole = 0 ) AS cfg_array, LATERAL unnest(cfg_array.setconfig) AS cfg_set(s), LATERAL ( SELECT split_part(cfg_set.s, '=', 1) AS name, split_part(cfg_set.s, '=', 2) AS value ) AS nameval, LATERAL ( SELECT config_spec.name FROM config_spec WHERE nameval.name = config_spec.backend_setting ) AS spec ), pg_config AS ( SELECT spec.name, to_jsonb( CASE WHEN u.v[1] IS NOT NULL THEN (settings.setting::int * (u.v[1])::int)::text || u.v[2] ELSE settings.setting || COALESCE(settings.unit, '') END ) AS value, source AS source FROM ( SELECT pg_settings.name AS name, pg_settings.unit AS unit, pg_settings.setting AS setting, (CASE WHEN pg_settings.source IN ('session', 'database') THEN pg_settings.source ELSE 'postgres ' || pg_settings.source END) AS source FROM pg_settings ) AS settings, LATERAL ( SELECT regexp_match(settings.unit, '(\\d+)(\\w+)') AS v ) AS u, LATERAL ( SELECT config_spec.name FROM config_spec WHERE settings.name = config_spec.backend_setting ) AS spec ) SELECT q.name, q.value, q.source, (CASE WHEN q.source < 'database'::edgedb._sys_config_source_t THEN 'SYSTEM' WHEN q.source = 'database'::edgedb._sys_config_source_t THEN 'DATABASE' ELSE 'SESSION' END)::edgedb._sys_config_scope_t AS scope FROM (SELECT u.name, u.value, u.source::edgedb._sys_config_source_t, row_number() OVER ( PARTITION BY u.name ORDER BY u.source::edgedb._sys_config_source_t DESC ) AS n FROM (SELECT * FROM ( SELECT * FROM config_defaults UNION ALL SELECT * FROM config_sys UNION ALL SELECT * FROM config_db UNION ALL SELECT * FROM config_sess UNION ALL SELECT * FROM pg_db_setting UNION ALL SELECT * FROM pg_config ) AS q WHERE ($1 IS NULL OR q.source::edgedb._sys_config_source_t = any($1)) AND ($2 IS NULL OR q.source::edgedb._sys_config_source_t <= $2) ) AS u ) AS q WHERE q.n = 1; $$ USING source_filter, max_source; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_read_sys_config_no_file_access'), args=[ ( 'source_filter', ('edgedb', '_sys_config_source_t[]',), 'NULL', ), ( 'max_source', ('edgedb', '_sys_config_source_t'), 'NULL', ), ], returns=('edgedb', '_sys_config_val_t'), set_returning=True, language='plpgsql', volatility='volatile', text=self.text, ) class SysConfigFunction(dbops.Function): text = f''' DECLARE backend_caps bigint; BEGIN backend_caps := edgedb.get_backend_capabilities(); IF (backend_caps & {int(pgcluster.BackendCapabilities.CONFIGFILE_ACCESS)}) != 0 THEN RETURN QUERY SELECT * FROM edgedb._read_sys_config_full(source_filter, max_source); ELSE RETURN QUERY SELECT * FROM edgedb._read_sys_config_no_file_access(source_filter, max_source); END IF; END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_read_sys_config'), args=[ ( 'source_filter', ('edgedb', '_sys_config_source_t[]',), 'NULL', ), ( 'max_source', ('edgedb', '_sys_config_source_t'), 'NULL', ), ], returns=('edgedb', '_sys_config_val_t'), set_returning=True, language='plpgsql', volatility='volatile', text=self.text, ) class SysVersionFunction(dbops.Function): text = f''' BEGIN RETURN ( SELECT value FROM _edgecon_state WHERE name = 'server_version' AND type = 'R' ); END; ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_sys_version'), args=[], returns=('jsonb',), language='plpgsql', volatility='stable', text=self.text, ) class SysGetTransactionIsolation(dbops.Function): "Get transaction isolation value as text compatible with EdgeDB's enum." text = r''' SELECT CASE setting WHEN 'repeatable read' THEN 'RepeatableRead' WHEN 'serializable' THEN 'Serializable' ELSE ( SELECT edgedb.raise( NULL::text, msg => ( 'unknown transaction isolation level "' || setting || '"' ) ) ) END FROM pg_settings WHERE name = 'transaction_isolation' ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_get_transaction_isolation'), args=[], returns=('text',), # This function only reads from a table. volatility='stable', text=self.text) class GetCachedReflection(dbops.Function): "Return a list of existing schema reflection helpers." text = ''' SELECT substring(proname, '__rh_#"%#"', '#') AS eql_hash, proargnames AS argnames FROM pg_proc INNER JOIN pg_namespace ON (pronamespace = pg_namespace.oid) WHERE proname LIKE '\\_\\_rh\\_%' ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_get_cached_reflection'), args=[], returns=('record',), set_returning=True, # This function only reads from a table. volatility='stable', text=self.text, ) class GetBaseScalarTypeMap(dbops.Function): """Return a map of base EdgeDB scalar type ids to Postgres type names.""" text = f''' VALUES {", ".join( f"""( {ql(str(k))}::uuid, { ql(f'{v[0]}.{v[1]}') if len(v) == 2 else ql(f'pg_catalog.{v[0]}') } )""" for k, v in types.base_type_name_map.items())} ''' def __init__(self) -> None: super().__init__( name=('edgedb', '_get_base_scalar_type_map'), args=[], returns=('record',), set_returning=True, volatility='immutable', text=self.text, ) class GetPgTypeForEdgeDBTypeFunction(dbops.Function): """Return Postgres OID representing a given EdgeDB type.""" text = f''' SELECT coalesce( ( SELECT tn::regtype::oid FROM edgedb._get_base_scalar_type_map() AS m(tid uuid, tn text) WHERE m.tid = "typeid" ), ( SELECT typ.oid FROM pg_catalog.pg_type typ WHERE typ.typname = "typeid"::text || '_domain' OR typ.typname = "typeid"::text || '_t' ), ( SELECT typ.typarray FROM pg_catalog.pg_type typ WHERE typ.typname = "elemid"::text || '_domain' OR typ.typname = "elemid"::text || '_t' OR typ.oid = ( SELECT tn::regtype::oid FROM edgedb._get_base_scalar_type_map() AS m(tid uuid, tn text) WHERE tid = elemid ) ), edgedb.raise( NULL::bigint, 'invalid_parameter_value', msg => ( format( 'cannot determine OID of EdgeDB type %L', typeid::text ) ) ) )::bigint ''' def __init__(self) -> None: super().__init__( name=('edgedb', 'get_pg_type_for_edgedb_type'), args=[ ('typeid', ('uuid',)), ('elemid', ('uuid',)), ], returns=('bigint',), volatility='stable', text=self.text, ) async def bootstrap(conn: asyncpg.Connection) -> None: commands = dbops.CommandGroup() commands.add_commands([ dbops.CreateSchema(name='edgedb'), dbops.CreateSchema(name='edgedbss'), dbops.CreateSchema(name='edgedbpub'), dbops.CreateSchema(name='edgedbstd'), dbops.CreateCompositeType(ExpressionType()), dbops.CreateTable(DBConfigTable()), dbops.CreateFunction(QuoteIdentFunction()), dbops.CreateFunction(QuoteNameFunction()), dbops.CreateFunction(AlterCurrentDatabaseSetString()), dbops.CreateFunction(AlterCurrentDatabaseSetStringArray()), dbops.CreateFunction(AlterCurrentDatabaseSetNonArray()), dbops.CreateFunction(AlterCurrentDatabaseSetArray()), dbops.CreateFunction(GetBackendCapabilitiesFunction()), dbops.CreateFunction(GetBackendTenantIDFunction()), dbops.CreateFunction(GetDatabaseBackendNameFunction()), dbops.CreateFunction(GetRoleBackendNameFunction()), dbops.CreateFunction(GetUserSequenceBackendNameFunction()), dbops.CreateFunction(GetStdModulesFunction()), dbops.CreateFunction(GetObjectMetadata()), dbops.CreateFunction(GetColumnMetadata()), dbops.CreateFunction(GetSharedObjectMetadata()), dbops.CreateFunction(GetDatabaseMetadataFunction()), dbops.CreateFunction(GetCurrentDatabaseFunction()), dbops.CreateFunction(RaiseExceptionFunction()), dbops.CreateFunction(RaiseExceptionOnNullFunction()), dbops.CreateFunction(RaiseExceptionOnNotNullFunction()), dbops.CreateFunction(RaiseExceptionOnEmptyStringFunction()), dbops.CreateFunction(AssertJSONTypeFunction()), dbops.CreateFunction(ExtractJSONScalarFunction()), dbops.CreateFunction(NormalizeNameFunction()), dbops.CreateFunction(GetNameModuleFunction()), dbops.CreateFunction(NullIfArrayNullsFunction()), dbops.CreateCompositeType(IndexDescType()), dbops.CreateFunction(IntrospectIndexesFunction()), dbops.CreateCompositeType(TriggerDescType()), dbops.CreateFunction(IntrospectTriggersFunction()), dbops.CreateCompositeType(TableInheritanceDescType()), dbops.CreateDomain(BigintDomain()), dbops.CreateDomain(TimestampTzDomain()), dbops.CreateDomain(TimestampDomain()), dbops.CreateDomain(DateDomain()), dbops.CreateDomain(DurationDomain()), dbops.CreateDomain(RelativeDurationDomain()), dbops.CreateFunction(StrToBigint()), dbops.CreateFunction(StrToDecimal()), dbops.CreateFunction(StrToInt64NoInline()), dbops.CreateFunction(StrToInt32NoInline()), dbops.CreateFunction(StrToInt16NoInline()), dbops.CreateFunction(StrToFloat64NoInline()), dbops.CreateFunction(StrToFloat32NoInline()), dbops.CreateFunction(GetTableDescendantsFunction()), dbops.CreateFunction(ParseTriggerConditionFunction()), dbops.CreateFunction(NormalizeArrayIndexFunction()), dbops.CreateFunction(ArrayIndexWithBoundsFunction()), dbops.CreateFunction(ArraySliceFunction()), dbops.CreateFunction(StringIndexWithBoundsFunction()), dbops.CreateFunction(LengthStringProxyFunction()), dbops.CreateFunction(LengthBytesProxyFunction()), dbops.CreateFunction(SubstrProxyFunction()), dbops.CreateFunction(StringSliceImplFunction()), dbops.CreateFunction(StringSliceFunction()), dbops.CreateFunction(BytesSliceFunction()), dbops.CreateFunction(JSONIndexByTextFunction()), dbops.CreateFunction(JSONIndexByIntFunction()), dbops.CreateFunction(JSONSliceFunction()), dbops.CreateFunction(DatetimeInFunction()), dbops.CreateFunction(DurationInFunction()), dbops.CreateFunction(LocalDatetimeInFunction()), dbops.CreateFunction(LocalDateInFunction()), dbops.CreateFunction(LocalTimeInFunction()), dbops.CreateFunction(ToTimestampTZCheck()), dbops.CreateFunction(ToDatetimeFunction()), dbops.CreateFunction(ToLocalDatetimeFunction()), dbops.CreateFunction(StrToBool()), dbops.CreateFunction(BytesIndexWithBoundsFunction()), dbops.CreateEnum(SysConfigSourceType()), dbops.CreateEnum(SysConfigScopeType()), dbops.CreateCompositeType(SysConfigValueType()), dbops.CreateFunction(SysConfigFullFunction()), dbops.CreateFunction(SysConfigNoFileAccessFunction()), dbops.CreateFunction(SysConfigFunction()), dbops.CreateFunction(SysVersionFunction()), dbops.CreateFunction(SysGetTransactionIsolation()), dbops.CreateFunction(GetCachedReflection()), dbops.CreateFunction(GetBaseScalarTypeMap()), dbops.CreateFunction(GetPgTypeForEdgeDBTypeFunction()), dbops.CreateFunction(DescribeSystemConfigAsDDLFunctionForwardDecl()), dbops.CreateFunction(DescribeDatabaseConfigAsDDLFunctionForwardDecl()), dbops.CreateFunction(DescribeRolesAsDDLFunctionForwardDecl()), ]) block = dbops.PLTopBlock() commands.generate(block) await _execute_block(conn, block) async def create_pg_extensions(conn: asyncpg.Connection) -> None: commands = dbops.CommandGroup() commands.add_commands([ dbops.CreateSchema(name='edgedbext'), dbops.CreateExtension( dbops.Extension(name='uuid-ossp', schema='edgedbext'), ), ]) block = dbops.PLTopBlock() commands.generate(block) await _execute_block(conn, block) classref_attr_aliases = { 'links': 'pointers', 'link_properties': 'pointers' } def tabname(schema: s_schema.Schema, obj: s_obj.Object) -> Tuple[str, str]: return ( 'edgedbss', common.get_backend_name( schema, obj, aspect='table', catenate=False, )[1], ) def inhviewname(schema: s_schema.Schema, obj: s_obj.Object) -> Tuple[str, str]: return ( 'edgedbss', common.get_backend_name( schema, obj, aspect='inhview', catenate=False, )[1], ) def ptr_col_name( schema: s_schema.Schema, obj: s_sources.Source, propname: str, ) -> str: prop = obj.getptr(schema, s_name.UnqualName(propname)) psi = types.get_pointer_storage_info(prop, schema=schema) return psi.column_name # type: ignore[no-any-return] def _generate_database_views(schema: s_schema.Schema) -> List[dbops.View]: Database = schema.get('sys::Database', type=s_objtypes.ObjectType) annos = Database.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = Database.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) view_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, Database, 'id'))}, (SELECT id FROM edgedb."_SchemaObjectType" WHERE name = 'sys::Database') AS {qi(ptr_col_name(schema, Database, '__type__'))}, (datname IN ( edgedb.get_database_backend_name( {ql(defines.EDGEDB_TEMPLATE_DB)}), edgedb.get_database_backend_name( {ql(defines.EDGEDB_SYSTEM_DB)}) )) AS {qi(ptr_col_name(schema, Database, 'internal'))}, (d.description)->>'name' AS {qi(ptr_col_name(schema, Database, 'name'))}, (d.description)->>'name' AS {qi(ptr_col_name(schema, Database, 'name__internal'))}, ARRAY[]::text[] AS {qi(ptr_col_name(schema, Database, 'computed_fields'))}, ((d.description)->>'builtin')::bool AS {qi(ptr_col_name(schema, Database, 'builtin'))} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d WHERE (d.description)->>'id' IS NOT NULL AND (d.description)->>'tenant_id' = edgedb.get_backend_tenant_id() ''' annos_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'target'))}, (annotations->>'value')::text AS {qi(ptr_col_name(schema, annos, 'value'))}, (annotations->>'owned')::bool AS {qi(ptr_col_name(schema, annos, 'owned'))} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements((d.description)->'annotations') ) AS annotations ''' int_annos_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'target'))}, (annotations->>'owned')::bool AS {qi(ptr_col_name(schema, int_annos, 'owned'))} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations__internal' ) ) AS annotations ''' objects = { Database: view_query, annos: annos_link_query, int_annos: int_annos_link_query, } views = [] for obj, query in objects.items(): tabview = dbops.View(name=tabname(schema, obj), query=query) inhview = dbops.View(name=inhviewname(schema, obj), query=query) views.append(tabview) views.append(inhview) return views def _generate_extension_views(schema: s_schema.Schema) -> List[dbops.View]: ExtPkg = schema.get('sys::ExtensionPackage', type=s_objtypes.ObjectType) annos = ExtPkg.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = ExtPkg.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) ver = ExtPkg.getptr( schema, s_name.UnqualName('version'), type=s_props.Property) ver_t = common.get_backend_name( schema, ver.get_target(schema), catenate=False, ) view_query = f''' SELECT (e.value->>'id')::uuid AS {qi(ptr_col_name(schema, ExtPkg, 'id'))}, (SELECT id FROM edgedb."_SchemaObjectType" WHERE name = 'sys::ExtensionPackage') AS {qi(ptr_col_name(schema, ExtPkg, '__type__'))}, (e.value->>'name') AS {qi(ptr_col_name(schema, ExtPkg, 'name'))}, (e.value->>'name__internal') AS {qi(ptr_col_name(schema, ExtPkg, 'name__internal'))}, ( (e.value->'version'->>'major')::int, (e.value->'version'->>'minor')::int, (e.value->'version'->>'stage')::text, (e.value->'version'->>'stage_no')::int, COALESCE( (SELECT array_agg(q.v::text) FROM jsonb_array_elements( e.value->'version'->'local' ) AS q(v)), ARRAY[]::text[] ) )::{qt(ver_t)} AS {qi(ptr_col_name(schema, ExtPkg, 'version'))}, (e.value->>'script') AS {qi(ptr_col_name(schema, ExtPkg, 'script'))}, ARRAY[]::text[] AS {qi(ptr_col_name(schema, ExtPkg, 'computed_fields'))}, (e.value->>'builtin')::bool AS {qi(ptr_col_name(schema, ExtPkg, 'builtin'))}, (e.value->>'internal')::bool AS {qi(ptr_col_name(schema, ExtPkg, 'internal'))} FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e ''' annos_link_query = f''' SELECT (e.value->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'target'))}, (annotations->>'value')::text AS {qi(ptr_col_name(schema, annos, 'value'))}, (annotations->>'is_owned')::bool AS {qi(ptr_col_name(schema, annos, 'owned'))} FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations') ) AS annotations ''' int_annos_link_query = f''' SELECT (e.value->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'target'))}, (annotations->>'is_owned')::bool AS {qi(ptr_col_name(schema, int_annos, 'owned'))} FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations__internal') ) AS annotations ''' objects = { ExtPkg: view_query, annos: annos_link_query, int_annos: int_annos_link_query, } views = [] for obj, query in objects.items(): tabview = dbops.View(name=tabname(schema, obj), query=query) inhview = dbops.View(name=inhviewname(schema, obj), query=query) views.append(tabview) views.append(inhview) return views def _generate_role_views(schema: s_schema.Schema) -> List[dbops.View]: Role = schema.get('sys::Role', type=s_objtypes.ObjectType) member_of = Role.getptr( schema, s_name.UnqualName('member_of'), type=s_links.Link) bases = Role.getptr( schema, s_name.UnqualName('bases'), type=s_links.Link) ancestors = Role.getptr( schema, s_name.UnqualName('ancestors'), type=s_links.Link) annos = Role.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = Role.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) superuser = f''' a.rolsuper OR EXISTS ( SELECT FROM pg_auth_members m INNER JOIN pg_catalog.pg_roles g ON (m.roleid = g.oid) WHERE m.member = a.oid AND g.rolname = {ql(defines.EDGEDB_SUPERGROUP)} ) ''' view_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, Role, 'id'))}, (SELECT id FROM edgedb."_SchemaObjectType" WHERE name = 'sys::Role') AS {qi(ptr_col_name(schema, Role, '__type__'))}, (d.description)->>'name' AS {qi(ptr_col_name(schema, Role, 'name'))}, (d.description)->>'name' AS {qi(ptr_col_name(schema, Role, 'name__internal'))}, {superuser} AS {qi(ptr_col_name(schema, Role, 'superuser'))}, False AS {qi(ptr_col_name(schema, Role, 'abstract'))}, False AS {qi(ptr_col_name(schema, Role, 'final'))}, False AS {qi(ptr_col_name(schema, Role, 'is_derived'))}, ARRAY[]::text[] AS {qi(ptr_col_name(schema, Role, 'inherited_fields'))}, ARRAY[]::text[] AS {qi(ptr_col_name(schema, Role, 'computed_fields'))}, ((d.description)->>'builtin')::bool AS {qi(ptr_col_name(schema, Role, 'builtin'))}, False AS {qi(ptr_col_name(schema, Role, 'internal'))}, (d.description)->>'password_hash' AS {qi(ptr_col_name(schema, Role, 'password'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d WHERE (d.description)->>'id' IS NOT NULL AND (d.description)->>'tenant_id' = edgedb.get_backend_tenant_id() ''' member_of_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, member_of, 'source'))}, ((md.description)->>'id')::uuid AS {qi(ptr_col_name(schema, member_of, 'target'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' bases_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, bases, 'source'))}, ((md.description)->>'id')::uuid AS {qi(ptr_col_name(schema, bases, 'target'))}, row_number() OVER (PARTITION BY a.oid ORDER BY m.roleid) AS {qi(ptr_col_name(schema, bases, 'index'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' ancestors_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, ancestors, 'source'))}, ((md.description)->>'id')::uuid AS {qi(ptr_col_name(schema, ancestors, 'target'))}, row_number() OVER (PARTITION BY a.oid ORDER BY m.roleid) AS {qi(ptr_col_name(schema, ancestors, 'index'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' annos_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, annos, 'target'))}, (annotations->>'value')::text AS {qi(ptr_col_name(schema, annos, 'value'))}, (annotations->>'owned')::bool AS {qi(ptr_col_name(schema, annos, 'owned'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations' ) ) AS annotations ''' int_annos_link_query = f''' SELECT ((d.description)->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'source'))}, (annotations->>'id')::uuid AS {qi(ptr_col_name(schema, int_annos, 'target'))}, (annotations->>'owned')::bool AS {qi(ptr_col_name(schema, int_annos, 'owned'))} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations__internal' ) ) AS annotations ''' objects = { Role: view_query, member_of: member_of_link_query, bases: bases_link_query, ancestors: ancestors_link_query, annos: annos_link_query, int_annos: int_annos_link_query, } views = [] for obj, query in objects.items(): tabview = dbops.View(name=tabname(schema, obj), query=query) inhview = dbops.View(name=inhviewname(schema, obj), query=query) views.append(tabview) views.append(inhview) return views def _generate_schema_ver_views(schema: s_schema.Schema) -> List[dbops.View]: Ver = schema.get( 'sys::GlobalSchemaVersion', type=s_objtypes.ObjectType, ) view_query = f''' SELECT (v.value->>'id')::uuid AS {qi(ptr_col_name(schema, Ver, 'id'))}, (SELECT id FROM edgedb."_SchemaObjectType" WHERE name = 'sys::GlobalSchemaVersion') AS {qi(ptr_col_name(schema, Ver, '__type__'))}, (v.value->>'name') AS {qi(ptr_col_name(schema, Ver, 'name'))}, (v.value->>'name') AS {qi(ptr_col_name(schema, Ver, 'name__internal'))}, (v.value->>'version')::uuid AS {qi(ptr_col_name(schema, Ver, 'version'))}, (v.value->>'builtin')::bool AS {qi(ptr_col_name(schema, Ver, 'builtin'))}, (v.value->>'internal')::bool AS {qi(ptr_col_name(schema, Ver, 'internal'))}, ARRAY[]::text[] AS {qi(ptr_col_name(schema, Ver, 'computed_fields'))} FROM jsonb_each( edgedb.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'GlobalSchemaVersion' ) AS v ''' objects = { Ver: view_query } views = [] for obj, query in objects.items(): tabview = dbops.View(name=tabname(schema, obj), query=query) inhview = dbops.View(name=inhviewname(schema, obj), query=query) views.append(tabview) views.append(inhview) return views def _make_json_caster( schema: s_schema.Schema, json_casts: Mapping[s_types.Type, s_casts.Cast], stype: s_types.Type, context: str, ) -> Callable[[Any], str]: cast = json_casts.get(stype) if cast is None: raise RuntimeError( f'there is no direct cast from std::json to ' f'the type of {context!r} ' f'({stype.get_displayname(schema)})' ) if cast.get_from_cast(schema): pgtype = types.pg_type_from_object(schema, stype) def _cast(val: Any) -> str: return f'({val})::{q(*pgtype)}' else: if cast.get_code(schema): cast_name = cast.get_name(schema) func_name = common.get_cast_backend_name( cast_name, aspect='function') else: func_name = cast.get_from_function(schema) def _cast(val: Any) -> str: return f'{q(*func_name)}({val})' return _cast def _generate_schema_aliases( schema: s_schema.Schema, module: s_name.UnqualName, ) -> List[dbops.View]: views = [] schema_objs = schema.get_objects( type=s_objtypes.ObjectType, included_modules=(module,), ) for schema_obj in schema_objs: bn = common.get_backend_name( schema, schema_obj, aspect='inhview', catenate=False, ) if module.name == 'sys' and not schema_obj.get_abstract(schema): bn = ('edgedbss', bn[1]) targets = [] for ptr in schema_obj.get_pointers(schema).objects(schema): if ptr.is_pure_computable(schema): continue psi = types.get_pointer_storage_info(ptr, schema=schema) if psi.table_type == 'ObjectType': ptr_name = ptr.get_shortname(schema).name col_name = psi.column_name if col_name != ptr_name: targets.append(f'{qi(col_name)} AS {qi(ptr_name)}') targets.append(f'{qi(col_name)} AS {qi(col_name)}') prefix = module.name.capitalize() alias_view = dbops.View( name=('edgedb', f'_{prefix}{schema_obj.get_name(schema).name}'), query=(f'SELECT {", ".join(targets)} FROM {q(*bn)}') ) views.append(alias_view) return views async def generate_support_views( conn: asyncpg.Connection, schema: s_schema.Schema, ) -> None: commands = dbops.CommandGroup() schema_alias_views = _generate_schema_aliases( schema, s_name.UnqualName('schema')) for alias_view in schema_alias_views: commands.add_command(dbops.CreateView(alias_view, or_replace=True)) InhObject = schema.get( 'schema::InheritingObject', type=s_objtypes.ObjectType) InhObject_ancestors = InhObject.getptr( schema, s_name.UnqualName('ancestors')) # "issubclass" SQL functions rely on access to the ancestors link. bn = common.get_backend_name( schema, InhObject_ancestors, aspect='inhview', catenate=False, ) alias_view = dbops.View( name=('edgedb', f'_SchemaInheritingObject__ancestors'), query=(f'SELECT * FROM {q(*bn)}') ) commands.add_command(dbops.CreateView(alias_view, or_replace=True)) conf = schema.get('cfg::Config', type=s_objtypes.ObjectType) cfg_views, _ = _generate_config_type_view( schema, conf, scope=None, path=[], rptr=None) commands.add_commands([ dbops.CreateView(dbops.View(name=tn, query=q), or_replace=True) for tn, q in cfg_views ]) conf = schema.get('cfg::SystemConfig', type=s_objtypes.ObjectType) cfg_views, _ = _generate_config_type_view( schema, conf, scope=qltypes.ConfigScope.SYSTEM, path=[], rptr=None) commands.add_commands([ dbops.CreateView(dbops.View(name=tn, query=q), or_replace=True) for tn, q in cfg_views ]) conf = schema.get('cfg::DatabaseConfig', type=s_objtypes.ObjectType) cfg_views, _ = _generate_config_type_view( schema, conf, scope=qltypes.ConfigScope.DATABASE, path=[], rptr=None) commands.add_commands([ dbops.CreateView(dbops.View(name=tn, query=q), or_replace=True) for tn, q in cfg_views ]) for dbview in _generate_database_views(schema): commands.add_command(dbops.CreateView(dbview, or_replace=True)) for extview in _generate_extension_views(schema): commands.add_command(dbops.CreateView(extview, or_replace=True)) for roleview in _generate_role_views(schema): commands.add_command(dbops.CreateView(roleview, or_replace=True)) for verview in _generate_schema_ver_views(schema): commands.add_command(dbops.CreateView(verview, or_replace=True)) sys_alias_views = _generate_schema_aliases( schema, s_name.UnqualName('sys')) for alias_view in sys_alias_views: commands.add_command(dbops.CreateView(alias_view, or_replace=True)) block = dbops.PLTopBlock() commands.generate(block) await _execute_block(conn, block) async def generate_support_functions( conn: asyncpg.Connection, schema: s_schema.Schema, ) -> None: commands = dbops.CommandGroup() commands.add_commands([ dbops.CreateFunction(IssubclassFunction()), dbops.CreateFunction(IssubclassFunction2()), dbops.CreateFunction(GetSchemaObjectNameFunction()), ]) block = dbops.PLTopBlock() commands.generate(block) await _execute_block(conn, block) async def generate_more_support_functions( conn: asyncpg.Connection, compiler: edbcompiler.Compiler, schema: s_schema.Schema, testmode: bool, ) -> None: commands = dbops.CommandGroup() _, text = edbbootstrap.compile_bootstrap_script( compiler, schema, _describe_config( schema, source='system override', testmode=testmode), output_format=edbcompiler.IoFormat.BINARY, ) DescribeSystemConfigAsDDLFunction = dbops.Function( name=('edgedb', '_describe_system_config_as_ddl'), args=[], returns=('text'), # Stable because it's raising exceptions. volatility='stable', text=text, ) _, text = edbbootstrap.compile_bootstrap_script( compiler, schema, _describe_config( schema, source='database', testmode=testmode), output_format=edbcompiler.IoFormat.BINARY, ) DescribeDatabaseConfigAsDDLFunction = dbops.Function( name=('edgedb', '_describe_database_config_as_ddl'), args=[], returns=('text'), # Stable because it's raising exceptions. volatility='stable', text=text, ) commands.add_commands([ dbops.CreateFunction( DescribeSystemConfigAsDDLFunction, or_replace=True), dbops.CreateFunction( DescribeDatabaseConfigAsDDLFunction, or_replace=True), dbops.CreateFunction( DescribeRolesAsDDLFunction(schema), or_replace=True), dbops.CreateFunction(GetSequenceBackendNameFunction()), dbops.CreateFunction(DumpSequencesFunction()), ]) block = dbops.PLTopBlock() commands.generate(block) await _execute_block(conn, block) def _describe_config( schema: s_schema.Schema, source: str, testmode: bool, ) -> str: """Generate an EdgeQL query to render config as DDL.""" if source == 'system override': scope = qltypes.ConfigScope.SYSTEM config_object_name = 'cfg::SystemConfig' elif source == 'database': scope = qltypes.ConfigScope.DATABASE config_object_name = 'cfg::DatabaseConfig' else: raise AssertionError(f'unexpected configuration source: {source!r}') cfg = schema.get(config_object_name, type=s_objtypes.ObjectType) items = [] for ptr_name, p in cfg.get_pointers(schema).items(schema): pn = str(ptr_name) if pn in ('id', '__type__'): continue is_internal = ( p.get_annotation( schema, s_name.QualName('cfg', 'internal') ) == 'true' ) if is_internal and not testmode: continue ptype = p.get_target(schema) assert ptype is not None ptr_card = p.get_cardinality(schema) mult = ptr_card.is_multi() if isinstance(ptype, s_objtypes.ObjectType): item = textwrap.indent( _render_config_object( schema=schema, valtype=ptype, value_expr=str(ptype.get_name(schema)), scope=scope, join_term='', level=1, ), ' ' * 4, ) else: psource = f'{config_object_name}.{ qlquote.quote_ident(pn) }' renderer = _render_config_set if mult else _render_config_scalar item = textwrap.indent( renderer( schema=schema, valtype=ptype, value_expr=psource, name=pn, scope=scope, level=1, ), ' ' * 4, ) condition = f'EXISTS json_get(conf, {ql(pn)})' if is_internal: condition = f'({condition}) AND testmode' items.append(f"(\n{item}\n IF {condition} ELSE ''\n )") testmode_check = ( "<bool>json_get(cfg::get_config_json(),'__internal_testmode','value')" " ?? false" ) query = ( f"FOR conf IN {{cfg::get_config_json(sources := [{ql(source)}])}} " + "UNION (\n" + (f"FOR testmode IN {{{testmode_check}}} UNION (\n" if testmode else "") + "SELECT\n " + ' ++ '.join(items) + (")" if testmode else "") + ")" ) return query def _render_config_value( *, schema: s_schema.Schema, valtype: s_types.Type, value_expr: str, ) -> str: if valtype.issubclass( schema, schema.get('std::anyreal', type=s_scalars.ScalarType), ): val = f'<str>{value_expr}' elif valtype.issubclass( schema, schema.get('std::bool', type=s_scalars.ScalarType), ): val = f'<str>{value_expr}' elif valtype.issubclass( schema, schema.get('std::str', type=s_scalars.ScalarType), ): val = f'cfg::_quote({value_expr})' else: raise AssertionError( f'unexpected configuration value type: ' f'{valtype.get_displayname(schema)}' ) return val def _render_config_set( *, schema: s_schema.Schema, valtype: s_types.Type, value_expr: str, scope: qltypes.ConfigScope, name: str, level: int, ) -> str: assert isinstance(valtype, s_scalars.ScalarType) v = _render_config_value( schema=schema, valtype=valtype, value_expr=value_expr) if level == 1: return ( f"'CONFIGURE {scope.to_edgeql()} " f"SET { qlquote.quote_ident(name) } := {{' ++ " f"array_join(array_agg({v}), ', ') ++ '}};'" ) else: indent = ' ' * (4 * (level - 1)) return ( f"'{indent}{ qlquote.quote_ident(name) } := {{' ++ " f"array_join(array_agg({v}), ', ') ++ '}},'" ) def _render_config_scalar( *, schema: s_schema.Schema, valtype: s_types.Type, value_expr: str, scope: qltypes.ConfigScope, name: str, level: int, ) -> str: assert isinstance(valtype, s_scalars.ScalarType) v = _render_config_value( schema=schema, valtype=valtype, value_expr=value_expr) if level == 1: return ( f"'CONFIGURE {scope.to_edgeql()} " f"SET { qlquote.quote_ident(name) } := ' ++ {v} ++ ';'" ) else: indent = ' ' * (4 * (level - 1)) return f"'{indent}{ qlquote.quote_ident(name) } := ' ++ {v} ++ ','" def _render_config_object( *, schema: s_schema.Schema, valtype: s_objtypes.ObjectType, value_expr: str, scope: qltypes.ConfigScope, join_term: str, level: int, ) -> str: # Generate a valid `CONFIGURE <SCOPE> INSERT ConfigObject` # shape for a given configuration object type or # `INSERT ConfigObject` for a nested configuration type. sub_layouts = _describe_config_object( schema=schema, valtype=valtype, level=level + 1, scope=scope) sub_layouts_items = [] if level == 1: decor = [f'CONFIGURE {scope.to_edgeql()} INSERT ', ';\\n'] else: decor = ['(INSERT ', ')'] indent = ' ' * (4 * (level - 1)) for type_name, type_layout in sub_layouts.items(): if type_layout: sub_layout_item = ( f"'{indent}{decor[0]}{type_name} {{\\n'\n++ " + "\n++ ".join(type_layout) + f" ++ '{indent}}}{decor[1]}'" ) else: sub_layout_item = ( f"'{indent}{decor[0]}{type_name}{decor[1]}'" ) if len(sub_layouts) > 1: if type_layout: sub_layout_item = ( f'(WITH item := item[IS {type_name}]' f' SELECT {sub_layout_item}) ' f'IF item.__type__.name = {ql(str(type_name))}' ) else: sub_layout_item = ( f'{sub_layout_item} ' f'IF item.__type__.name = {ql(str(type_name))}' ) sub_layouts_items.append(sub_layout_item) if len(sub_layouts_items) > 1: sli_render = '\nELSE '.join(sub_layouts_items) + "\nELSE ''" else: sli_render = sub_layouts_items[0] return '\n'.join(( f"array_join(array_agg((", f" FOR item IN {{ {value_expr} }}", f" UNION (", f"{textwrap.indent(sli_render, ' ' * 4)}", f" )", f")), {ql(join_term)})", )) def _describe_config_object( *, schema: s_schema.Schema, valtype: s_objtypes.ObjectType, level: int, scope: qltypes.ConfigScope, ) -> Dict[s_name.QualName, List[str]]: cfg_types = [valtype] cfg_types.extend(cfg_types[0].descendants(schema)) layouts = {} for cfg in cfg_types: items = [] for ptr_name, p in cfg.get_pointers(schema).items(schema): pn = str(ptr_name) if ( pn in ('id', '__type__') or p.get_annotation( schema, s_name.QualName('cfg', 'internal'), ) == 'true' ): continue ptype = p.get_target(schema) assert ptype is not None ptr_card = p.get_cardinality(schema) mult = ptr_card.is_multi() psource = f'item.{ qlquote.quote_ident(pn) }' if isinstance(ptype, s_objtypes.ObjectType): rval = textwrap.indent( _render_config_object( schema=schema, valtype=ptype, value_expr=psource, scope=scope, join_term=' UNION ', level=level + 1, ), ' ' * 2, ).strip() indent = ' ' * (4 * (level - 1)) item = ( f"'{indent}{qlquote.quote_ident(pn)} " f":= (\\n'\n++ {rval} ++ '\\n{indent}),\\n'" ) condition = None else: render = _render_config_set if mult else _render_config_scalar item = render( schema=schema, valtype=ptype, value_expr=psource, scope=scope, name=pn, level=level, ) condition = f'EXISTS {psource}' if condition is not None: item = f"({item} ++ '\\n' IF {condition} ELSE '')" items.append(item) layouts[cfg.get_name(schema)] = items return layouts def _build_key_source( schema: s_schema.Schema, exc_props: Iterable[s_pointers.Pointer], rptr: Optional[s_pointers.Pointer], source_idx: str, ) -> str: if exc_props: restargets = [] for prop in exc_props: pname = prop.get_shortname(schema).name restarget = f'(q{source_idx}.val)->>{ql(pname)}' restargets.append(restarget) targetlist = ','.join(restargets) keysource = textwrap.dedent(f'''\ (SELECT ARRAY[{targetlist}] AS key ) AS k{source_idx}''') else: assert rptr is not None rptr_name = rptr.get_shortname(schema).name keysource = textwrap.dedent(f'''\ (SELECT ARRAY[ (CASE WHEN q{source_idx}.val = 'null'::jsonb THEN NULL ELSE {ql(rptr_name)} END) ] AS key ) AS k{source_idx}''') return keysource def _build_key_expr(key_components: List[str]) -> str: key_expr = ' || '.join(key_components) final_keysource = textwrap.dedent(f'''\ (SELECT (CASE WHEN array_position(q.v, NULL) IS NULL THEN edgedbext.uuid_generate_v5( '{DATABASE_ID_NAMESPACE}'::uuid, array_to_string(q.v, ';') ) ELSE NULL END) AS key FROM (SELECT {key_expr} AS v) AS q )''') return final_keysource def _build_data_source( schema: s_schema.Schema, rptr: s_pointers.Pointer, source_idx: int, *, alias: Optional[str] = None, ) -> str: rptr_name = rptr.get_shortname(schema).name rptr_card = rptr.get_cardinality(schema) rptr_multi = rptr_card.is_multi() if alias is None: alias = f'q{source_idx + 1}' else: alias = f'q{alias}' if rptr_multi: sourceN = textwrap.dedent(f'''\ (SELECT jel.val FROM jsonb_array_elements( (q{source_idx}.val)->{ql(rptr_name)}) AS jel(val) ) AS {alias}''') else: sourceN = textwrap.dedent(f'''\ (SELECT (q{source_idx}.val)->{ql(rptr_name)} AS val ) AS {alias}''') return sourceN def _generate_config_type_view( schema: s_schema.Schema, stype: s_objtypes.ObjectType, *, scope: Optional[qltypes.ConfigScope], path: List[Tuple[s_pointers.Pointer, List[s_pointers.Pointer]]], rptr: Optional[s_pointers.Pointer], _memo: Optional[Set[s_obj.Object]] = None, ) -> Tuple[ List[Tuple[Tuple[str, str], str]], List[s_pointers.Pointer], ]: exc = schema.get('std::exclusive', type=s_constr.Constraint) json_t = schema.get('std::json', type=s_scalars.ScalarType) if scope is not None: if scope is qltypes.ConfigScope.SYSTEM: max_source = "'system override'" elif scope is qltypes.ConfigScope.DATABASE: max_source = "'database'" else: raise AssertionError(f'unexpected config scope: {scope!r}') else: max_source = 'NULL' if _memo is None: _memo = set() _memo.add(stype) views = [] json_casts = { c.get_to_type(schema): c for c in schema.get_casts_from_type(json_t) } sources = [] if not path: # This is the root config object. if rptr is None: source0 = textwrap.dedent(f'''\ (SELECT jsonb_object_agg(name, value) AS val FROM edgedb._read_sys_config(NULL, {max_source}) cfg) AS q0''') else: rptr_card = rptr.get_cardinality(schema) rptr_multi = rptr_card.is_multi() rptr_name = rptr.get_shortname(schema).name if rptr_multi: source0 = textwrap.dedent(f'''\ (SELECT el.val FROM (SELECT (value::jsonb) AS val FROM edgedb._read_sys_config(NULL, {max_source}) WHERE name = {ql(rptr_name)}) AS cfg, LATERAL jsonb_array_elements(cfg.val) AS el(val) ) AS q0''') else: source0 = textwrap.dedent(f'''\ (SELECT (value::jsonb) AS val FROM edgedb._read_sys_config(NULL, {max_source}) cfg WHERE name = {ql(rptr_name)}) AS q0''') sources.append(source0) key_start = 0 else: key_start = 0 for i, (l, exc_props) in enumerate(path): l_card = l.get_cardinality(schema) l_multi = l_card.is_multi() l_name = l.get_shortname(schema).name if i == 0: if l_multi: sourceN = textwrap.dedent(f'''\ (SELECT el.val FROM (SELECT (value::jsonb) AS val FROM edgedb._read_sys_config(NULL, {max_source}) WHERE name = {ql(l_name)}) AS cfg, LATERAL jsonb_array_elements(cfg.val) AS el(val) ) AS q{i}''') else: sourceN = textwrap.dedent(f'''\ (SELECT (value::jsonb) AS val FROM edgedb._read_sys_config(NULL, {max_source}) cfg WHERE name = {ql(l_name)}) AS q{i}''') else: sourceN = _build_data_source(schema, l, i - 1) sources.append(sourceN) sources.append(_build_key_source(schema, exc_props, l, str(i))) if exc_props: key_start = i exclusive_props = [] single_links = [] multi_links = [] multi_props = [] target_cols = [] where = '' path_steps = [p.get_shortname(schema).name for p, _ in path] if rptr is not None: self_idx = len(path) # Generate a source rvar for _this_ target rptr_name = rptr.get_shortname(schema).name path_steps.append(rptr_name) if self_idx > 0: sourceN = _build_data_source(schema, rptr, self_idx - 1) sources.append(sourceN) else: self_idx = 0 sval = f'(q{self_idx}.val)' for pp_name, pp in stype.get_pointers(schema).items(schema): pn = str(pp_name) if pn in ('id', '__type__'): continue pp_type = pp.get_target(schema) assert pp_type is not None pp_card = pp.get_cardinality(schema) pp_multi = pp_card.is_multi() pp_psi = types.get_pointer_storage_info(pp, schema=schema) pp_col = pp_psi.column_name if isinstance(pp, s_links.Link): if pp_multi: multi_links.append(pp) else: single_links.append(pp) else: pp_cast = _make_json_caster( schema, json_casts, pp_type, f'cfg::Config.{".".join(path_steps)}') if pp_multi: multi_props.append((pp, pp_cast)) else: extract_col = ( f'{pp_cast(f"{sval}->{ql(pn)}")}' f' AS {qi(pp_col)}') target_cols.append(extract_col) constraints = pp.get_constraints(schema).objects(schema) if any(c.issubclass(schema, exc) for c in constraints): exclusive_props.append(pp) exclusive_props.sort(key=lambda p: p.get_shortname(schema).name) if exclusive_props or rptr: sources.append( _build_key_source(schema, exclusive_props, rptr, str(self_idx))) key_components = [f'k{i}.key' for i in range(key_start, self_idx + 1)] final_keysource = f'{_build_key_expr(key_components)} AS k' sources.append(final_keysource) key_expr = 'k.key' target_cols.append(f'{key_expr} AS id') where = f'{key_expr} IS NOT NULL' target_cols.append(textwrap.dedent(f'''\ (SELECT id FROM edgedb."_SchemaObjectType" WHERE name = 'cfg::' || ({sval}->>'_tname')) AS __type__''')) else: key_expr = f"'{CONFIG_ID}'::uuid" target_cols.extend([ f"{key_expr} AS id", f'(SELECT id FROM edgedb."_SchemaObjectType" ' f"WHERE name = 'cfg::Config') AS __type__", ]) key_components = [] for link in single_links: link_name = link.get_shortname(schema).name link_type = link.get_target(schema) link_psi = types.get_pointer_storage_info(link, schema=schema) link_col = link_psi.column_name if rptr is not None: target_path = path + [(rptr, exclusive_props)] else: target_path = path target_views, target_exc_props = _generate_config_type_view( schema, link_type, scope=scope, path=target_path, rptr=link, _memo=_memo, ) for descendant in link_type.descendants(schema): if descendant not in _memo: desc_views, _ = _generate_config_type_view( schema, descendant, scope=scope, path=target_path, rptr=link, _memo=_memo, ) views.extend(desc_views) target_source = _build_data_source( schema, link, self_idx, alias=link_name) sources.append(target_source) target_key_source = _build_key_source( schema, target_exc_props, link, source_idx=link_name) sources.append(target_key_source) if target_exc_props: target_key_components = [f'k{link_name}.key'] else: target_key_components = key_components + [f'k{link_name}.key'] target_key = _build_key_expr(target_key_components) target_cols.append(f'({target_key}) AS {qi(link_col)}') views.extend(target_views) target_cols_str = ',\n'.join(target_cols) fromlist = ',\n'.join(f'LATERAL {s}' for s in sources) target_query = textwrap.dedent(f'''\ SELECT {textwrap.indent(target_cols_str, ' ' * 4).strip()} FROM {fromlist} ''') if where: target_query += f'\nWHERE\n {where}' views.append((tabname(schema, stype), target_query)) views.append((inhviewname(schema, stype), target_query)) for link in multi_links: target_sources = list(sources) link_name = link.get_shortname(schema).name link_type = link.get_target(schema) if rptr is not None: target_path = path + [(rptr, exclusive_props)] else: target_path = path target_views, target_exc_props = _generate_config_type_view( schema, link_type, scope=scope, path=target_path, rptr=link, _memo=_memo, ) views.extend(target_views) for descendant in link_type.descendants(schema): if descendant not in _memo: desc_views, _ = _generate_config_type_view( schema, descendant, scope=scope, path=target_path, rptr=link, _memo=_memo, ) views.extend(desc_views) target_source = _build_data_source( schema, link, self_idx, alias=link_name) target_sources.append(target_source) target_key_source = _build_key_source( schema, target_exc_props, link, source_idx=link_name) target_sources.append(target_key_source) target_key_components = key_components + [f'k{link_name}.key'] target_key = _build_key_expr(target_key_components) target_fromlist = ',\n'.join(f'LATERAL {s}' for s in target_sources) link_query = textwrap.dedent(f'''\ SELECT q.source, q.target FROM (SELECT {key_expr} AS source, {target_key} AS target FROM {target_fromlist} ) q WHERE q.target IS NOT NULL ''') views.append((tabname(schema, link), link_query)) views.append((inhviewname(schema, link), link_query)) for prop, pp_cast in multi_props: target_sources = list(sources) pn = prop.get_shortname(schema).name target_source = _build_data_source( schema, prop, self_idx, alias=pn) target_sources.append(target_source) target_fromlist = ',\n'.join(f'LATERAL {s}' for s in target_sources) link_query = textwrap.dedent(f'''\ SELECT {key_expr} AS source, {pp_cast(f'q{pn}.val')} AS target FROM {target_fromlist} ''') views.append((tabname(schema, prop), link_query)) views.append((inhviewname(schema, prop), link_query)) return views, exclusive_props async def _execute_block( conn: asyncpg.Connection, block: dbops.SQLBlock, ) -> None: await _execute_sql_script(conn, block.to_string()) async def _execute_sql_script( conn: asyncpg.Connection, sql_text: str, ) -> None: if debug.flags.bootstrap: debug.header('Bootstrap Script') if len(sql_text) > 102400: # Make sure we don't hog CPU by attempting to highlight # huge scripts. print(sql_text) else: debug.dump_code(sql_text, lexer='sql') try: await conn.execute(sql_text) except Exception as e: position = getattr(e, 'position', None) internal_position = getattr(e, 'internal_position', None) context = getattr(e, 'context', '') pl_func_line: Optional[int] if context: pl_func_line_m = re.search( r'^PL/pgSQL function inline_code_block line (\d+).*', context, re.M) if pl_func_line_m: pl_func_line = int(pl_func_line_m.group(1)) else: pl_func_line = None point = None if position is not None: point = int(position) text = getattr(e, 'query', None) if text is None: # Parse errors text = sql_text elif internal_position is not None: point = int(internal_position) text = getattr(e, 'internal_query', None) elif pl_func_line: point = _edgeql_rust.offset_of_line(sql_text, pl_func_line) text = sql_text if point is not None: context = parser_context.ParserContext( 'query', text, start=point, end=point) exceptions.replace_context(e, context) raise
# -*- coding: utf-8 -*- """ Functionality to generate and work with the directory structure of a project """ from __future__ import absolute_import, print_function import os from os.path import exists as path_exists from os.path import join as join_path from . import templates, utils from .contrib.six import string_types from .log import logger class FileOp(object): """Namespace for file operations during an update""" NO_OVERWRITE = 0 """Do not overwrite an existing file during update (still created if not exists) """ NO_CREATE = 1 """Do not create the file during an update""" def define_structure(_, opts): """Creates the project structure as dictionary of dictionaries Args: struct (dict): previous directory structure (ignored) opts (dict): options of the project Returns: tuple(dict, dict): structure as dictionary of dictionaries and input options """ struct = {opts['project']: { '.gitignore': (templates.gitignore(opts), FileOp.NO_OVERWRITE), 'src': { opts['package']: {'__init__.py': templates.init(opts), 'skeleton.py': (templates.skeleton(opts), FileOp.NO_CREATE)}, }, 'tests': {'conftest.py': (templates.conftest_py(opts), FileOp.NO_OVERWRITE), 'test_skeleton.py': (templates.test_skeleton(opts), FileOp.NO_CREATE)}, 'docs': {'conf.py': templates.sphinx_conf(opts), 'authors.rst': templates.sphinx_authors(opts), 'index.rst': (templates.sphinx_index(opts), FileOp.NO_OVERWRITE), 'license.rst': templates.sphinx_license(opts), 'changelog.rst': templates.sphinx_changelog(opts), 'Makefile': templates.sphinx_makefile(opts), '_static': { '.gitignore': templates.gitignore_empty(opts)}}, 'README.rst': (templates.readme(opts), FileOp.NO_OVERWRITE), 'AUTHORS.rst': (templates.authors(opts), FileOp.NO_OVERWRITE), 'LICENSE.txt': (templates.license(opts), FileOp.NO_OVERWRITE), 'CHANGELOG.rst': (templates.changelog(opts), FileOp.NO_OVERWRITE), 'setup.py': templates.setup_py(opts), 'setup.cfg': (templates.setup_cfg(opts), FileOp.NO_OVERWRITE), 'requirements.txt': (templates.requirements(opts), FileOp.NO_OVERWRITE), '.coveragerc': (templates.coveragerc(opts), FileOp.NO_OVERWRITE)}} return struct, opts def create_structure(struct, opts, prefix=None): """Manifests a directory structure in the filesystem Args: struct (dict): directory structure as dictionary of dictionaries opts (dict): options of the project prefix (str): prefix path for the structure Returns: tuple(dict, dict): directory structure as dictionary of dictionaries (similar to input, but only containing the files that actually changed) and input options Raises: :obj:`RuntimeError`: raised if content type in struct is unknown """ update = opts.get('update') or opts.get('force') pretend = opts.get('pretend') if prefix is None: prefix = os.getcwd() changed = {} for name, content in struct.items(): if isinstance(content, string_types): utils.create_file(join_path(prefix, name), content, pretend) changed[name] = content elif isinstance(content, dict): utils.create_directory(join_path(prefix, name), update, pretend) changed[name], _ = create_structure( struct[name], opts, prefix=join_path(prefix, name)) elif content is None: pass else: raise RuntimeError("Don't know what to do with content type " "{type}.".format(type=type(content))) return changed, opts def apply_update_rules(struct, opts, prefix=None): """Apply update rules using :obj:`~.FileOp` to a directory structure. As a result the filtered structure keeps only the files that actually will be written. Args: opts (dict): options of the project, containing the following flags: - **update**: when the project already exists and should be updated - **force**: overwrite all the files that already exist struct (dict): directory structure as dictionary of dictionaries (in this tree representation, each leaf can be just a string or a tuple also containing an update rule) prefix (str): prefix path for the structure Returns: tuple(dict, dict): directory structure with keys removed according to the rules (in this tree representation, all the leaves are strings) and input options """ if prefix is None: prefix = os.getcwd() filtered = {} for k, v in struct.items(): if isinstance(v, dict): v, _ = apply_update_rules(v, opts, join_path(prefix, k)) else: path = join_path(prefix, k) v = apply_update_rule_to_file(path, v, opts) if v: filtered[k] = v return filtered, opts def apply_update_rule_to_file(path, value, opts): """Applies the update rule to a given file path Args: path (str): file path value (tuple or str): content (and update rule) opts (dict): options of the project, containing the following flags: - **update**: when the project already exists and should be updated - **force**: overwrite all the files that already exist Returns: content of the file if it should be generated or None otherwise. """ if isinstance(value, (tuple, list)): content, rule = value else: content, rule = value, None update = opts.get('update') force = opts.get('force') skip = update and not force and ( rule == FileOp.NO_CREATE or path_exists(path) and rule == FileOp.NO_OVERWRITE) if skip: logger.report('skip', path) return None return content
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for the fake file system implementation.""" import unittest from dfvfs.path import fake_path_spec from dfvfs.resolver import context from dfvfs.vfs import fake_file_system from tests import test_lib as shared_test_lib class FakeFileSystemTest(shared_test_lib.BaseTestCase): """Tests for the fake file system.""" def setUp(self): """Sets up the needed objects used throughout the test.""" self._resolver_context = context.Context() self._fake_path_spec = fake_path_spec.FakePathSpec(location='/') def tearDown(self): """Cleans up the needed objects used throughout the test.""" self._resolver_context.Empty() def testOpenAndClose(self): """Test the open and close functionality.""" file_system = fake_file_system.FakeFileSystem(self._resolver_context) self.assertIsNotNone(file_system) file_system.Open(self._fake_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): """Test the file entry exists by path specification functionality.""" file_system = fake_file_system.FakeFileSystem(self._resolver_context) self.assertIsNotNone(file_system) file_system.AddFileEntry( '/test_data/testdir_fake/file1.txt', file_data=b'FILE1') file_system.Open(self._fake_path_spec) path_spec = fake_path_spec.FakePathSpec( location='/test_data/testdir_fake/file1.txt') self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec)) path_spec = fake_path_spec.FakePathSpec( location='/test_data/testdir_fake/file6.txt') self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): """Tests the GetFileEntryByPathSpec function.""" file_system = fake_file_system.FakeFileSystem(self._resolver_context) self.assertIsNotNone(file_system) file_system.AddFileEntry( '/test_data/testdir_fake/file1.txt', file_data=b'FILE1') file_system.Open(self._fake_path_spec) path_spec = fake_path_spec.FakePathSpec( location='/test_data/testdir_fake/file1.txt') file_entry = file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.name, 'file1.txt') path_spec = fake_path_spec.FakePathSpec( location='/test_data/testdir_fake/file6.txt') file_entry = file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNone(file_entry) file_system.Close() def testGetRootFileEntry(self): """Test the get root file entry functionality.""" file_system = fake_file_system.FakeFileSystem(self._resolver_context) self.assertIsNotNone(file_system) file_system.Open(self._fake_path_spec) file_entry = file_system.GetRootFileEntry() self.assertIsNotNone(file_entry) self.assertEqual(file_entry.name, '') file_system.Close() if __name__ == '__main__': unittest.main()
import matplotlib.pyplot as plt from numpy import pi, exp from ....Classes.Arc1 import Arc1 from ....Classes.LamSlot import LamSlot from ....Classes.Segment import Segment from ....definitions import config_dict from ....Functions.Plot import ( ARROW_COLOR, ARROW_WIDTH, MAIN_LINE_COLOR, MAIN_LINE_STYLE, MAIN_LINE_WIDTH, P_FONT_SIZE, SC_FONT_SIZE, SC_LINE_COLOR, SC_LINE_STYLE, SC_LINE_WIDTH, TEXT_BOX, plot_quote, ) from ....Methods import ParentMissingError MAGNET_COLOR = config_dict["PLOT"]["COLOR_DICT"]["MAGNET_COLOR"] def plot_schematics( self, is_default=False, is_add_point_label=False, is_add_schematics=True, is_add_main_line=True, type_add_active=True, save_path=None, is_show_fig=True, ): """Plot the schematics of the slot Parameters ---------- self : SlotM16 A SlotM16 object is_default : bool True: plot default schematics, else use current slot values is_add_point_label : bool True to display the name of the points (Z1, Z2....) is_add_schematics : bool True to display the schematics information (W0, H0...) is_add_main_line : bool True to display "main lines" (slot opening and 0x axis) type_add_active : int 0: No active surface, 1: active surface as winding, 2: active surface as magnet save_path : str full path including folder, name and extension of the file to save if save_path is not None is_show_fig : bool To call show at the end of the method """ # Use some default parameter if is_default: slot = type(self)(Zs=4, W0=0.02, H0=0.01, H1=0.06, W1=0.04) lam = LamSlot( Rint=80e-3, Rext=240e-3, is_internal=True, is_stator=False, slot=slot ) slot.plot_schematics( is_default=False, is_add_point_label=is_add_point_label, is_add_schematics=is_add_schematics, is_add_main_line=is_add_main_line, type_add_active=type_add_active, save_path=save_path, is_show_fig=is_show_fig, ) else: # Getting the main plot if self.parent is None: raise ParentMissingError("Error: The slot is not inside a Lamination") lam = self.parent lam.plot(alpha=pi / self.Zs, is_show_fig=False) # center slot on Ox axis fig = plt.gcf() ax = plt.gca() point_dict = self._comp_point_coordinate() if self.is_outwards(): sign = +1 else: sign = -1 # Adding point label if is_add_point_label: for name, Z in point_dict.items(): ax.text( Z.real, Z.imag, name, fontsize=P_FONT_SIZE, bbox=TEXT_BOX, ) # Adding schematics if is_add_schematics: # W0 line = Segment(point_dict["Z7"], point_dict["Z2"]) line.plot( fig=fig, ax=ax, color=ARROW_COLOR, linewidth=ARROW_WIDTH, label="W0", offset_label=self.H0 * 0.2, is_arrow=True, fontsize=SC_FONT_SIZE, ) # W1 line = Segment(point_dict["Z5"], point_dict["Z4"]) line.plot( fig=fig, ax=ax, color=ARROW_COLOR, linewidth=ARROW_WIDTH, label="W1", offset_label=self.H0 * 0.2, is_arrow=True, fontsize=SC_FONT_SIZE, ) # H0 plot_quote( Z1=point_dict["Z1"], Zlim1=point_dict["Z1"].real + 1j * point_dict["Z3"].imag, Zlim2=point_dict["Z3"], Z2=point_dict["Z2"], offset_label=1j * 0.1 * self.W0, fig=fig, ax=ax, label="H0", ) # H1 line = Segment(point_dict["Z5"], point_dict["Z6"]) line.plot( fig=fig, ax=ax, color=ARROW_COLOR, linewidth=ARROW_WIDTH, label="H1", offset_label=1j * self.W0 * 0.1, is_arrow=True, fontsize=SC_FONT_SIZE, ) if is_add_main_line: # Ox axis line = Segment(0, lam.Rext * 1.5) line.plot( fig=fig, ax=ax, color=MAIN_LINE_COLOR, linestyle=MAIN_LINE_STYLE, linewidth=MAIN_LINE_WIDTH, ) # Top arc line = Arc1( begin=point_dict["Z1"], end=point_dict["Z8"], radius=self.get_Rbo(), is_trigo_direction=True, ) line.plot( fig=fig, ax=ax, color=MAIN_LINE_COLOR, linestyle=MAIN_LINE_STYLE, linewidth=MAIN_LINE_WIDTH, ) if type_add_active == 1: self.plot_active(fig=fig, is_show_fig=False) elif type_add_active == 2: self.plot_active( fig=fig, is_show_fig=False, enforced_default_color=MAGNET_COLOR ) # Zooming and cleaning W = self.W1 / 2 * 1.3 Rint, Rext = self.comp_radius() plt.axis("equal") ax.set_xlim(Rint, Rext) ax.set_ylim(-W, W) fig.canvas.set_window_title(type(self).__name__ + " Schematics") ax.set_title("") ax.get_legend().remove() ax.set_axis_off() # Save / Show if save_path is not None: fig.savefig(save_path) plt.close() if is_show_fig: fig.show()
# -*- coding: utf-8 -*- from __future__ import unicode_literals, division import logging from contextlib import closing from functools import partial from ratelimiter import RateLimiter from requests import Session from .query import SpaceTrackQueryBuilder, SUPPORTABLE_ENTITIES class SpaceTrackApi(object): def __init__(self, login, password, session=None, **kwargs): self.logger = kwargs.pop('logger', logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))) self.credentials = dict(identity=login, password=password) self.session = session if isinstance(session, Session) else Session() self.url = kwargs.pop('url', 'https://www.space-track.org') self.query_url = kwargs.pop('query_url', 'basicspacedata/query') self.login_url = kwargs.pop('login_url', 'ajaxauth/login') self.logout_url = kwargs.pop('logout_url', 'ajaxauth/logout') @RateLimiter(max_calls=20, period=60) def query(self, **kwargs): qb = SpaceTrackQueryBuilder(**kwargs) url = '{url}/{query}'.format(url=self.url, query=qb) self.logger.info('Send request to %s', url) with closing(self.session.get(url)) as resp: try: m = getattr(resp, self.get_response_method(qb.format)) return m() if callable(m) else m except Exception as e: self.logger.exception(e) return resp.text def login(self): with closing(self.session.post('{}/{}'.format(self.url, self.login_url), data=self.credentials)) as resp: if resp.reason == 'OK': self.logger.info('"Successfully logged in"') return self.session def logout(self): with closing(self.session.get('{}/{}'.format(self.url, self.logout_url))) as resp: if resp.reason == 'OK': self.logger.info(resp.text) def close(self): self.session.close() @staticmethod def get_response_method(fmt): method_mapping = { 'json': 'json', 'xml': 'text', 'html': 'text', 'csv': 'text', 'tle': 'text', '3le': 'text', 'kvn': 'text', 'stream': 'iter_content' } return method_mapping.get(fmt, 'content') def __call__(self, **kwargs): return self.query(**kwargs) def __getattr__(self, item): if item not in SUPPORTABLE_ENTITIES: raise AttributeError('`{!r}` object has no attribute "{}"'.format(self, item)) return partial(self.query, entity=item) def __enter__(self): self.login() return self def __exit__(self, *args): self.logout() self.close()
import pandas as pd from my_lambdata.ds_utilities import enlarge, get_business_info def test_business_info(): test_df = get_business_info('fast food', 'denver', 'FL') assert len(test_df.iloc[0]['Phone_No']) >= 10 def test_elarge(): assert enlarge(3) == 300