code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if not A:
return 0
a_len = len(A)
l = 0
s = None
for i in range(0, a_len):
e = A[i]
if s != e:
s = e
A[l] = s
l += 1
A = A[a_len:]
return l
|
pikeszfish/Leetcode.py
|
leetcode.py/RemoveDuplicatesfromSortedArray.py
|
Python
|
mit
| 403
|
from ptc_socket import Socket
from constants import SHUT_RD, SHUT_WR, SHUT_RDWR, WAIT, NO_WAIT, ABORT
__all__ = [Socket, SHUT_RD, SHUT_WR, SHUT_RDWR, WAIT, NO_WAIT, ABORT]
|
lukius/ptc
|
ptc/__init__.py
|
Python
|
mit
| 172
|
# -*- coding: utf-8 -*-
CAFFE_ROOT = 'distribute/'
CAFFE_MODEL_PATH = 'model/deploy.prototxt'
CAFFE_WEIGHTS_PATH = 'model/fer_alexnet_weights.caffemodel'
GPU_MODE = False
DEFAULT_BOOST = 1.4
STRONG_BOOST = 1.9
NEGATIVE_BOOST = 0.715
EMOTIONS = ["neutral","happy","sad","angry/disgusted","(unused #4)","surprised/afraid","(unused #6)"]
# Ignore the first 30 frames so the webcam can synchronize and adjust to light
RAMP_FRAMES = 30
MAX_CONTENT = 50
WEIGHTED_RECOMMENDATION = True
|
nshaud/content-recommendation
|
config.py
|
Python
|
mit
| 486
|
# Copyright (C) 2012 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class ADS(Signature):
name = "persistence_ads"
description = "Creates an Alternate Data Stream (ADS)"
severity = 3
categories = ["persistence", "ads"]
authors = ["nex"]
minimum = "0.5"
def run(self):
result = False
for file_path in self.results["behavior"]["summary"]["files"]:
if len(file_path) <= 3:
continue
if ":" in file_path.replace("/", "\\").split("\\")[-1]:
self.data.append({"file" : file_path})
result = True
return result
|
0x00ach/zer0m0n
|
signatures/persistence_ads.py
|
Python
|
gpl-3.0
| 1,307
|
import os
import feedparser
import wget
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
url = feedparser.parse('http://heldeepradio.spinninpodcasts.com')
folder = "/mnt/Media/Muziek/Heldeep/"
print(EasyID3.valid_keys.keys())
for post in url.entries:
print (post.title + ": " + post.link)
download = wget.download(post.link, folder + post.title + ".mp3")
audio = MP3(download, EasyID3)
audio["title"] = u"" + post.title
audio["artist"] = u"Oliver Heldens"
audio["album"] = u"Heldeep Radio"
audio.pprint()
audio.save()
|
JoooostB/heldeepgrabber
|
run.py
|
Python
|
mit
| 564
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.cached_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
@test_util.run_deprecated_v1
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.cached_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
@test_util.run_deprecated_v1
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxisComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_all_in_graph_and_eager_modes
class EuclideanNormGradientTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testNegative(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([-3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testKeepdims(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testGradientChain(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x) * 5, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testTwoElements(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3, -4], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testNegativeZero(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([1.0, -0.0], dtype=dtype)
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x)
dx = tape.gradient(y, x)
dx_answer = constant_op.constant([1.0, -0.0], dtype=dtype)
self.assertAllClose(dx, dx_answer)
self.assertAllClose(1.0 / dx, 1.0 / dx_answer)
def testZeros(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([0.0, -0.0], dtype=dtype)
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x)
dx = tape.gradient(y, x)
dx_answer = constant_op.constant(
[float("NaN"), float("NaN")], dtype=dtype)
self.assertAllClose(dx, dx_answer)
def test2D_1(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_2(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 0), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_3(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_4(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[3], [4]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test3D_1(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
def test3D_2(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 0), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
def test3D_3(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 3e-3)
def test3D_4(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 2), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
class SegmentMinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
class DivNoNanGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithDenominatorIsZero(self):
x = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
y = array_ops.zeros_like(x,
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list()))
class MulNoNanGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32)
outputs = math_ops.mul_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithRhsIsZero(self):
x_vals = [0, 1.0, np.nan, np.inf, np.NINF]
x = constant_op.constant(x_vals, dtype=dtypes.float32)
y = array_ops.zeros_like(x, dtype=dtypes.float32)
outputs = math_ops.mul_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), x_vals)
class XlogyTest(test.TestCase):
def _xlogy_gradients(self, x, y):
xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0])
xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0])
return xlogy_xgrad, xlogy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
xlogy_expected_xgrad = self.evaluate(math_ops.log(y))
xlogy_expected_ygrad = self.evaluate(x / y)
self.assertAllClose(xlogy_expected_xgrad, xlogy_xgrad)
self.assertAllClose(xlogy_expected_ygrad, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
self.assertAllClose(-np.inf, xlogy_xgrad)
self.assertAllClose(np.inf, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
class XdivyTest(test.TestCase):
def _xdivy_gradients(self, x, y):
xdivy_xgrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), x)[0])
xdivy_ygrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), y)[0])
return xdivy_xgrad, xdivy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
xdivy_expected_xgrad = self.evaluate(1 / y)
xdivy_expected_ygrad = self.evaluate(-x / y**2)
self.assertAllClose(xdivy_expected_xgrad, xdivy_xgrad)
self.assertAllClose(xdivy_expected_ygrad, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
self.assertAllClose(np.inf, xdivy_xgrad)
self.assertAllClose(-np.inf, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_all_in_graph_and_eager_modes
class PowGradTest(test.TestCase):
def test_zero_grad_tf_gradients(self):
if context.executing_eagerly():
self.skipTest("tf.gradients not supported in eager.")
x = constant_op.constant([-1., 0., 1.])
g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0])
self.assertAllClose([-2., 0., 2.], g)
def test_zero_grad_tape(self):
try:
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([-1, 0., 1.])
with backprop.GradientTape() as tape:
tape.watch(x)
g = tape.gradient(math_ops.pow(x, 2), x)
g = self.evaluate(g)
self.assertAllClose([-2., 0., 2.], g)
finally:
check_numerics_callback.disable_check_numerics()
@test_util.run_all_in_graph_and_eager_modes
class NextAfterTest(test.TestCase):
def _nextafter_gradient(self, x1, x2):
with backprop.GradientTape() as tape:
tape.watch(x1)
tape.watch(x2)
y = math_ops.nextafter(x1, x2)
return tape.gradient(y, [x1, x2])
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
x1 = constant_op.constant(0.1, dtype=dtype)
x2 = constant_op.constant(3.1, dtype=dtype)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testDynamicShapes(self):
for dtype in [dtypes.float32, dtypes.float64]:
default_x1 = constant_op.constant(0.1, dtype=dtype)
default_x2 = constant_op.constant(3.1, dtype=dtype)
x1 = array_ops.placeholder_with_default(default_x1, shape=None)
x2 = array_ops.placeholder_with_default(default_x2, shape=None)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2, 2, 2, 2, 2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
def testBroadcastingWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
chemelnucfin/tensorflow
|
tensorflow/python/ops/math_grad_test.py
|
Python
|
apache-2.0
| 24,477
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from nova import context
from nova.tests.unit import fake_instance
class MockConf(mock.Mock):
def __init__(self, lxd_args=(), lxd_kwargs={}, *args, **kwargs):
default = {
'config_drive_format': None,
'instances_path': '/fake/instances/path',
'image_cache_subdirectory_name': '/fake/image/cache',
'vif_plugging_timeout': 10,
'my_ip': '1.2.3.4',
'vlan_interface': 'vlanif',
'flat_interface': 'flatif',
}
default.update(kwargs)
super(MockConf, self).__init__(*args, **default)
lxd_default = {
'root_dir': '/fake/lxd/root',
'timeout': 20,
'retry_interval': 2
}
lxd_default.update(lxd_kwargs)
self.lxd = mock.Mock(lxd_args, **lxd_default)
class MockInstance(mock.Mock):
def __init__(self, name='fake-uuid', uuid='fake-uuid',
image_ref='mock_image', ephemeral_gb=0, memory_mb=-1,
vcpus=0, *args, **kwargs):
super(MockInstance, self).__init__(
uuid=uuid,
image_ref=image_ref,
ephemeral_gb=ephemeral_gb,
*args, **kwargs)
self.uuid = uuid
self.name = name
self.flavor = mock.Mock(memory_mb=memory_mb, vcpus=vcpus)
def lxd_mock(*args, **kwargs):
default = {
'profile_list.return_value': ['fake_profile'],
'container_list.return_value': ['mock-instance-1', 'mock-instance-2'],
'host_ping.return_value': True,
}
default.update(kwargs)
return mock.Mock(*args, **default)
def annotated_data(*args):
class List(list):
pass
class Dict(dict):
pass
new_args = []
for arg in args:
if isinstance(arg, (list, tuple)):
new_arg = List(arg)
new_arg.__name__ = arg[0]
elif isinstance(arg, dict):
new_arg = Dict(arg)
new_arg.__name__ = arg['tag']
else:
raise TypeError('annotate_data can only handle dicts, '
'lists and tuples')
new_args.append(new_arg)
return lambda func: ddt.data(*new_args)(ddt.unpack(func))
def _fake_instance():
ctxt = context.get_admin_context()
_instance_values = {
'display_name': 'fake_display_name',
'name': 'fake_name',
'uuid': 'fake_uuid',
'image_ref': 'fake_image',
'vcpus': 1,
'memory_mb': 512,
'root_gb': 10,
'host': 'fake_host',
'expected_attrs': ['system_metadata'],
}
return fake_instance.fake_instance_obj(
ctxt, **_instance_values)
|
tpouyer/nova-lxd
|
nova_lxd/tests/stubs.py
|
Python
|
apache-2.0
| 3,293
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 16:45:51 2015
@author: hugo
"""
from beampy.document import document
from bs4 import BeautifulSoup
import re
from beampy.scour import scour
import glob
import os
import sys
from subprocess import check_call, check_output
import tempfile
import time
import hashlib # To create uniq id for elements
import logging
_log = logging.getLogger(__name__)
# Lib to check the source code
import inspect
# Create REGEX pattern
find_svg_tags = re.compile('id="(.*)"')
# Regex to remove tab new line
remove_tabnewline = re.compile('\s+')
def unit_operation(value, to=0):
"""
realise operation on values and return the result in px
expl: value = 3px+4cm -> the sum
value = +5cm, to=450 -> 450px+5cm
"""
if '+' in value:
vsplited = value.split('+')
for v in vsplited :
to += float(convert_unit(v))
elif '-' in value:
vsplited = value.split('-')
for v in vsplited:
to -= float(convert_unit(v))
return "%0.1f"%to
def convert_unit(value, ppi=72):
"""
Function to convert size given in some unit to pixels, following the
https://www.w3.org/TR/2008/REC-CSS2-20080411/syndata.html#length-units
Parameters:
-----------
value, str or int:
The given size followed by it's unit. Fixed units are (in, cm,
mm, pt, pc). Relative units are (em, ex, %)
ppi, int, optional:
The number of pixel per inch (Latex use 72)
"""
value = str(value)
# px to px
if 'px' in value:
value = '%0.1f' % (float(value.replace('px', '')))
# mm to cm
if 'mm' in value:
value = "%fcm" % (float(value.replace('mm',''))*10**-1)
# cm to inch
if "cm" in value:
value = "%fin" % (float(value.replace('cm', ''))*(1/2.54))
# pc to inch
if 'pc' in value:
value = '%fin' % (float(value.replace('pc','')*12))
# pt to inch
if "pt" in value:
value = "%fin" % (float(value.replace('pt', ''))*(1/72.0))
# inch to px
if "in" in value:
# 1 inch = 72px
out = float(value.replace('in', ''))*ppi
else:
out = float(value)
return out
def pre_cache_svg_image(svg_frames):
"""
Function to extract raster image from svg to define them only
once on the slide.
"""
all_images = []
out_svg_frames = []
findimage = re.compile(r'<image.*?>')
for frame in svg_frames:
svgimages = findimage.findall(frame)
all_images += svgimages
#add the cleaned frame to the ouput
out_svg_frames += [findimage.sub('\n',frame)]
return out_svg_frames, all_images
def make_global_svg_defs_new_but_buggy(svg_soup):
"""
Function to change svg refs and id to a global counter
to avoid miss-called elements in slides
Input -> svg_soup: beautifulsoup object of the svg
"""
# Test if it exist a svg_id global counter
if 'svg_id' not in document._global_counter:
document._global_counter['svg_id'] = 0 # init the counter
# Get all id from defined object in <defs>
for defs in svg_soup.find_all('defs'):
tags_to_replace = find_svg_tags.findall(str(defs))
base_name = "beampy"
for cpt, tag in enumerate(tags_to_replace):
#print(tag)
#print({'xlink:href': '#%s'%tag})
#Some use of this defs
new_tag = "%s_%i"%(base_name, document._global_counter['svg_id'])
for elem in svg_soup.find_all(attrs={'xlink:href': '#%s'%tag}):
elem['xlink:href'] = "#%s"%new_tag
#Inside defs get the good one to change
for elem in svg_soup.find_all(attrs={'id': tag}):
elem['id'] = new_tag
document._global_counter['svg_id'] += 1
#print('Svg refs changed in %0.4fs'%(time.time() - tps))
return svg_soup
def make_global_svg_defs(svg_soup):
"""
Function to use global counter for id in svg defs and use
svg_soup a BeautifulSoup object of the svg file
"""
# Test if it exist a svg_id global counter
if 'svg_id' not in document._global_counter:
document._global_counter['svg_id'] = 0 #init the counter
#str_svg to replace modified id in all the svg content
strsvg = svg_soup.decode('utf8')
#Find defs
svgdefs = svg_soup.find('defs')
#change_tags = ['path','clipPath','symbol','image', 'mask']
#change_tags = ['clipPath','mask','symbol','image']
#print(strsvg, svgdefs)
#Create unique_id_ with time
text_id = ("%0.4f"%time.time()).split('.')[-1]
if svgdefs is not None:
for tag in svgdefs.findAll(lambda x: x is not None and x.has_attr('id')):
oldid = tag['id']
newid = "%s_%i"%(text_id, document._global_counter['svg_id'])
strsvg = re.sub(oldid+'"', newid+'"', strsvg)
if tag.name in ['clipPath','linearGradient']:
strsvg = re.sub('(#'+oldid+')', '#'+newid, strsvg)
# print(oldid, newid)
document._global_counter['svg_id'] += 1
#Reparse the new svg
soup = BeautifulSoup(strsvg, 'xml')
#print('Svg refs changed in %0.4fs'%(time.time() - tps))
return soup
def horizontal_centering(object_width, xinit=0, page_width=None):
"""
Function to center and object on the page_width
xinit: is the initial position
final position:
xinit + available_space/2
"""
if page_width == None:
page_width = document._width
if page_width > object_width:
available_space = (page_width - object_width)
#print available_space, object_width
xnew = xinit + (available_space/2)
else:
xnew = xinit
return xnew
def optimize_svg(svgfile_in):
"""
Use python scour to optimise svg gain roughtly 50% in size
options (default):
{'strip_ids': False,
'shorten_ids': False,
'simple_colors': True,
'strip_comments': False,
'remove_metadata': False,
'outfilename': None,
'group_create': False,
'protect_ids_noninkscape': False,
'indent_type': 'space',
'keep_editor_data': False,
'shorten_ids_prefix': '',
'keep_defs': False,
'renderer_workaround': True,
'style_to_xml': True,
'protect_ids_prefix': None,
'enable_viewboxing': False,
'digits': 5,
'embed_rasters': True,
'infilename': 'none',
'strip_xml_prolog': False,
'group_collapse': True,
'quiet': False,
'protect_ids_list': None}
"""
#get default option
opts = scour.generateDefaultOptions()
options = opts.__dict__
#custom options for beampy
#TODO: add this option to a configuration file
options['indent_type'] = None
options['strip_comments'] = True
#options['group_create'] = True #create group with identical element defs
#run scour
#print('optimize svg...')
t = time.time()
#svgout = scour.scourString(svgfile_in, opts).encode("UTF-8")
svgout = scour.scourString(svgfile_in, opts)
print('optimize svg run in %f'%(time.time()-t))
#print('done')
return svgout
def latex2svg(latexstring, write_tmpsvg=False):
"""
Command to render latex -> dvi -> svg
Parameters
==========
write_tmpsvg: true or false optional,
Write the svg produced by dvisvgm to a file (if True)
otherwise the output is read from stdout
"""
_log.debug('Run latex2svg function')
_log.debug(latexstring)
dvisvgmcmd = document._external_cmd['dvisvgm']
# Create variable to store name of the created temp file
tmpname = None
tex_outputs = None
# Get the temporary dir location
tmppath = tempfile.gettempdir()
with tempfile.NamedTemporaryFile(mode='w', prefix='beampytmp', suffix='.tex') as f:
# Get the name of the file
tmpname, tmpextension = os.path.splitext(f.name)
# Write latex commands to the file
f.write( latexstring )
# Flush file content, so that it is available for latex command
f.file.flush()
#Run Latex
#t = time.time()
tex = os.popen("cd "+tmppath+" && latex -interaction=nonstopmode "+f.name)
#print('latex run in %f'%(time.time()-t))
#print tex.read() #To print tex output
"""
This is a test to get the base line from latex output
\\newlength\\x
\\newlength\\y
\\x=1em
\\y=1ex
\\showthe\\x
\\showthe\\y
"""
#find_size = re.compile(r'> \d.*?.pt.')
#tex_em, tex_ex = find_size.findall(tex.read())
#Convert latex pt to cm (1pt = 28.4cm)
#tex_em = "%0.5fcm"%(float(tex_em[2:-3]) * 1/28.4)
#convert to pixel
tex_outputs = tex.read()
tex.close() # close the os.popen
#Run dvi2svgm
if tex_outputs is None or 'error' in tex_outputs or '!' in tex_outputs:
print('Latex compilation error')
print(tex_outputs)
#Remove temp files
for f in glob.glob(tmpname+'*'):
os.remove(f)
# Stop beampy compilation
sys.exit(1)
else:
#dvisvgm to convert dvi to svg [old -e option not compatible with linkmark]
if write_tmpsvg:
_log.debug('Write dvisvgm output as an svg file')
cmd = dvisvgmcmd
cmd += ' -n -a -n -a --linkmark=none -o {filename}.svg --verbosity=0 {filename}.dvi'
cmd = cmd.format(filename=tmpname)
res = os.popen(cmd)
resp = res.read()
res.close()
with open(tmpname + '.svg') as svgf:
outsvg = svgf.read()
else:
cmd = dvisvgmcmd+' -n -s -a --linkmark=none -v0 {filename}.dvi'
cmd = cmd.format(filename=tmpname)
outsvg = check_output(cmd, shell=True).decode('utf8')
#Remove temp files
for f in glob.glob(tmpname+'*'):
os.remove(f)
outsvg = clean_ghostscript_warnings(outsvg)
_log.debug(outsvg)
_log.debug(type(outsvg))
return outsvg
def clean_ghostscript_warnings(rawsvg):
"""
Function to remove warning that appears in stdout
The begining of the file is something like:
*** WARNING - you have selected SAFER, indicating you want Ghostscript
to execute in a safer environment, but at the same time
have selected WRITESYSTEMDICT. Unless you use this option with
care and specifically, remember to execute code like:
"systemdict readonly pop"
it is possible that malicious <?xml version='1.0'?>
<svg [...]/>
"""
if isinstance(rawsvg, list):
svg_lines = rawsvg
else:
svg_lines = rawsvg.splitlines()
start_svg = 0
for i, line in enumerate(svg_lines):
if line.startswith('<svg') or line.startswith('<?xml'):
start_svg = i
break
if isinstance(rawsvg, list):
good_svg = svg_lines[start_svg:]
else:
good_svg = '\n'.join(svg_lines[start_svg:])
if start_svg > 2:
_log.debug('SVG have been cleaned from GS warnings, here is the original:')
_log.debug(rawsvg)
return good_svg
def getsvgwidth( svgfile ):
"""
get svgfile width using inkscape
"""
inkscapecmd = document._external_cmd['inkscape']
cmd = inkscapecmd + ' -z -W %s'%svgfile
req = os.popen(cmd)
res = req.read()
req.close()
return res
def getsvgheight( svgfile ):
"""
get svgfile height using inkscape
"""
inkscapecmd = document._external_cmd['inkscape']
cmd = inkscapecmd + ' -z -H %s'%svgfile
req = os.popen(cmd)
res = req.read()
req.close()
return res
def gcs():
"""
Fonction get current slide of the doc
"""
return document._curentslide
def set_curentslide(slide_id):
"""
Set the curent slide to the given slide_id
"""
document._curentslide = slide_id
def set_lastslide():
'''
Set the curent slide as the last slide added in the presentation
'''
last_slide_id = 'slide_%i' % (document._global_counter['slide'])
document._curentslide = last_slide_id
def gce(doc=document):
"""
Function to get the current element number
"""
return doc._global_counter['element']
def epstopdf(eps_input_file, pdf_output_file):
'''
Runs pdf2svg in shell:
pdf2svg pdf_input_file svg_output_file
'''
return check_call([document._external_cmd['epstopdf'],
eps_input_file, pdf_output_file])
def pdf2svg(pdf_input_file, svg_output_file):
'''
Runs pdf2svg in shell:
pdf2svg pdf_input_file svg_output_file
'''
return check_call([document._external_cmd['pdf2svg'],
pdf_input_file, svg_output_file])
def convert_eps_to_svg(eps_input_file, temp_directory='local'):
'''
Open pdf_input_file, convert to svg using pdf2svg.
'''
local_directory, filename_pdf = os.path.split(eps_input_file)
filename = os.path.splitext(filename_pdf)[0]
if temp_directory == 'local':
temp_directory = local_directory
if len(temp_directory) > 0:
svg_output_file = temp_directory + '/' + filename + '.svg'
pdf_output_file = temp_directory + '/' + filename + '.pdf'
else:
svg_output_file = filename + '.svg'
pdf_output_file = filename + '.pdf'
try:
epstopdf(eps_input_file, pdf_output_file)
pdf2svg(pdf_output_file,svg_output_file)
with open(svg_output_file, 'r') as f:
svg_figure = f.read()
check_call(['rm', svg_output_file])
check_call(['rm', pdf_output_file])
return svg_figure
except ValueError:
return None
def convert_pdf_to_svg(pdf_input_file, temp_directory='local'):
'''
Open pdf_input_file, convert to svg using pdf2svg.
'''
local_directory, filename_pdf = os.path.split(pdf_input_file)
filename = os.path.splitext(filename_pdf)[0]
if temp_directory == 'local':
temp_directory = local_directory
if len(temp_directory) > 0:
svg_output_file = temp_directory + '/' + filename + '.svg'
else:
svg_output_file = filename + '.svg'
try:
pdf2svg(pdf_input_file, svg_output_file)
with open(svg_output_file, 'r') as f:
svg_figure = f.read()
check_call(['rm', svg_output_file])
return svg_figure
except ValueError:
return None
def load_args_from_theme(function_name, args):
"""
Function to set args of a given element
"""
for key in args:
if args[key] == "" or args[key] is None:
try:
args[key] = document._theme[function_name][key]
except KeyError:
print("[Beampy] No theme propertie for %s in %s" % (key, element_id))
def check_function_args( function, arg_values_dict, lenient = False ):
"""
Function to check input function args.
Functions args are defined in the default_theme.py
or if a theme is added the new value is taken rather than the default one
"""
function_name = function.__name__
default_dict = document._theme[function_name]
outdict = {}
for key, value in arg_values_dict.items():
#Check if this arguments exist for this function
if key in default_dict:
outdict[key] = value
else:
if not lenient :
print("Error the key %s is not defined for %s module"%(key, function_name))
print_function_args( function_name )
sys.exit(1)
#Check if their is ommited arguments that need to be loaded by default
for key, value in default_dict.items():
if key not in outdict:
outdict[key] = value
return outdict
def print_function_args(function_name):
#Pretty print of function arguments with default values
print("Allowed arguments for %s"%(function_name))
for key, value in document._theme[function_name].items():
print("%s: [%s] %s"%(key, str(value), type(value)))
def inherit_function_args(function_name, args_dict):
#Allow to add args defined for an other function to the args_dict
for key, value in document._theme[function_name].items():
if key not in args_dict:
args_dict[key] = value
return args_dict
def color_text( textin, color ):
'''
Adds Latex color to a string.
'''
if "#" in color:
textin = r'{\color[HTML]{%s} %s }' % (color.replace('#', '').upper(),
textin)
else:
textin = r'{\color{%s} %s }' % (color, textin)
return textin
def dict_deep_update(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
from http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression/44512#44512
"""
for key, value in original.items():
if not key in update:
update[key] = value
elif isinstance(value, dict):
dict_deep_update( value, update[key] )
return update
def create_element_id(bpmod, use_args=True, use_name=True,
use_content=True, add_slide=True, slide_position=True):
"""
create a unique id for the beampy_module using bpmod.content
and bpmod.args.keys() and bpmod.name
"""
ct_to_hash = ''
if add_slide:
ct_to_hash += bpmod.slide_id
if use_args and bpmod.args is not None:
ct_to_hash += ''.join(['%s:%s' % (k, v) for k, v in bpmod.args.items()])
if use_name and bpmod.name is not None:
ct_to_hash += bpmod.name
if use_content and bpmod.content is not None:
ct_to_hash += str(bpmod.content)
if slide_position:
ct_to_hash += str(len(document._slides[bpmod.slide_id].element_keys))
outid = None
if ct_to_hash != '':
# print(ct_to_hash)
try:
outid = hashlib.md5( ct_to_hash ).hexdigest()
except:
outid = hashlib.md5( ct_to_hash.encode('utf8') ).hexdigest()
if outid in document._slides[bpmod.slide_id].element_keys:
print("Id for this element already exist!")
sys.exit(0)
outid = None
#print(outid)
return outid
# TODO: Improve this function
def get_command_line(func_name):
"""
Function to print the line of the command in the source code file
frame,filename,nline,function_name,lines,index = inspect.stack()[-1]
"""
frame, filename, nline, function_name, lines, index = inspect.stack()[-1]
# print(nline, func_name)
if not isinstance(func_name, str):
# func_name = func_name.func_name
func_name = func_name.__name__
# print(frame,filename,nline,function_name,lines,index)
start = None
src = document._source_code.source(stop=nline).split('\n')
# print(src)
for cpt, line in enumerate(src[::-1]):
if func_name+'(' in line:
# print(line)
start = (nline) - (cpt + 1)
break
# print start
if start is not None:
stop = nline-1
source = document._source_code.source(start+1, nline).replace('\n', '')
else:
start = 0
stop = 0
source = func_name
# Remove tab and space from source
source = remove_tabnewline.sub(' ', source)
return (start, nline-1, source)
def guess_file_type(file_name, file_type=None):
"""
Guess the type of a file name
"""
file_extensions = {
'svg': ['svg'],
'pdf': ['pdf'],
'png': ['png'],
'jpeg': ['jpg', 'jpeg'],
'gif': ['gif'],
'eps': ['eps']
}
if file_type is None:
ext = os.path.splitext(file_name)[1][1:]
found = False
for file_type in file_extensions:
if ext in file_extensions[file_type]:
found = True
break
if not found:
raise TypeError('Unknown file type '+ext+' for file name: ' + file_name + '.' )
return file_type
# Function to render texts in document
def render_texts(elements_to_render=None, extra_packages=None):
r"""
Function to merge all text in the document to run latex only once
This function build the .tex file and then call two external programs
.tex -> latex -> .dvi -> dvisvgm -> svgfile
Parameters:
-----------
elements_to_render, list of beampy_module (optional):
List of beampy_module object to render (the default is None,
which render all text module in all slides).
extra_packages, list of string (optional):
Give a list of extra latex packages to use in the latex
template. Latex packages should be given as follow:
[r'\usepackage{utf8x}{inputenc}']
"""
if elements_to_render is None:
elements_to_render = []
if extra_packages is None:
extra_packages = []
print('Render texts of slides with latex')
latex_header = r"""
\documentclass[crop=true, multi=varwidth]{standalone}
\usepackage[utf8x]{inputenc}
\usepackage{fix-cm}
\usepackage[hypertex]{hyperref}
\usepackage[svgnames]{xcolor}
\renewcommand{\familydefault}{\sfdefault}
\usepackage{varwidth}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
%s
\begin{document}
""" % ('\n'.join(extra_packages + document._latex_packages))
latex_pages = []
latex_footer = r"\end{document}"
# logging.debug(latex_header)
#Loop over slide
t = time.time()
cpt_page = 1
elements_pages = []
if elements_to_render == []:
for islide in range(len(document._slides)-1):
# don't loop over document._slides keys directly as they will be ordered differentyl in py2 and py3
sid = 'slide_%i' % (islide+1)
#Loop over content in the slide
for cid in document._slides[sid].element_keys:
e = document._slides[sid].contents[cid]
#Check if it's a text element, is it cached?, render it to latex syntax
if e.type == 'text' and e.usetex and not e.rendered:
elements_to_render += [e]
for e in elements_to_render:
if e.cache and document._cache is not None:
_log.debug('Render_texts test cache for element %s(id=%s) on slide: %s' % (e.name, e.id, e.slide_id))
ct_cache = document._cache.is_cached(e.slide_id, e)
if ct_cache is False:
# Run the pre_rendering
e.pre_render()
try:
latex_pages += [e.latex_text]
elements_pages += [{"element": e, "page": cpt_page}]
cpt_page += 1
except Exception as e:
print(e)
else:
e.pre_render()
try:
latex_pages += [e.latex_text]
elements_pages += [{"element": e, "page": cpt_page}]
cpt_page += 1
except Exception as e:
print(e)
_log.debug(latex_header+'\n \\newpage \n'.join(latex_pages)+latex_footer)
# Write the file to latex
if len(latex_pages) > 0:
# get the location of tempdir
tmppath = tempfile.gettempdir()
# Create a None tempory filename
tmpname = None
# Create a variable to store the latex output
tex_outputs = None
# Use tempfile.NamedTemporaryFile to create a text file with .tex suffix and beampytmp prefix
# NamedTemporaryFile automaticly close the file at the end of the context by default
with tempfile.NamedTemporaryFile(mode='w', suffix='.tex', prefix='beampytmp') as f:
# Get the name of the file
tmpname, extension = os.path.splitext(f.name)
# Write down the latex code to this file
f.write(latex_header)
f.write('\n \\newpage \n'.join(latex_pages))
f.write(latex_footer)
print('Latex file writen in %f'%(time.time()-t))
# Flush the file content so that latex can see it
f.file.flush()
#Run Latex using subprocess
#t = time.time()
cmd = "cd "+tmppath+" && latex -interaction=nonstopmode --halt-on-error "+f.name
_log.debug(cmd)
tex = os.popen(cmd)
#print('Latex run in %f'%(time.time()-t))
tex_outputs = tex.read()
_log.debug(tex_outputs)
tex.close() # close os.popen
# to test the output of latex file
"""
with open('test_text_py2.tex', 'w') as f:
f.write(latex_header)
f.write('\n \\newpage \n'.join(latex_pages))
f.write(latex_footer)
"""
if tex_outputs is None or 'error' in tex_outputs or '!' in tex_outputs:
print(tex_outputs)
print('Latex compilation error')
#Remove temp files generated by latex
for f in glob.glob(tmpname+'*'):
os.remove(f)
# Stop Beampy compilation
sys.exit(1)
#Upload svg to each elements
dvisvgmcmd = document._external_cmd['dvisvgm']
t = time.time()
if tmpname is not None:
cmd = dvisvgmcmd+' -n -s -p1- --linkmark=none -v0 '+tmpname+'.dvi'
allsvgs = check_output(cmd, shell=True).decode('utf8', errors='replace')
allsvgs = allsvgs.splitlines()
#To split the data get the xml syntax <? xml ....?>
schema = get_xml_tag(allsvgs)
_log.debug('Schema to cut svg %s'%(str(schema)))
assert schema is not None
# Check if their is warning emitted by dvisvgm inside the svgfile
allsvgs = clean_ghostscript_warnings(allsvgs)
#Join all svg lines and split them each time you find the schema
svg_list = ''.join(allsvgs).split(schema)
if svg_list[0] == '':
svg_list = svg_list[1:]
_log.debug('Size of svg %i and size of latex pages %i'%(len(svg_list), len(elements_pages)))
assert len(svg_list) == len(elements_pages)
#Process all pages to svg
for i, ep in enumerate(elements_pages):
#Loop over content in the slide
ep['element'].svgtext = schema + svg_list[i]
print('DVI -> SVG in %f'%(time.time()-t))
#Remove temp files generated by latex
for f in glob.glob(tmpname+'*'):
os.remove(f)
PYTHON_XMLFIND_REGEX = re.compile(r'<\?xml[^>]+>')
def get_xml_tag(rawsvg):
"""
Function to find the xml tag in a file this tag could be
<?xml version='1.0'?>
or
<?xml version='1.0' encoding='UTF-8'?>
or other (depends on dvisvgm version
"""
if isinstance(rawsvg, list):
svg_lines = rawsvg
else:
svg_lines = rawsvg.splitlines()
xmltag = None
for l in svg_lines:
search_re = PYTHON_XMLFIND_REGEX.search(l)
if search_re:
xmltag = search_re.group(0)
break
return xmltag
# How do we split inputs paragraphs (all type of python strings)
PYTHON_COMMENT_REGEX = re.compile('"{3}?|"|\'{3}?|\'', re.MULTILINE)
def small_comment_parser(src):
"""
Find comments inside a python source code.
return a list of parsed comments.
Parameters
----------
src : str
The source code to parse.x
"""
# print(src)
cur_marker_pos = 0
cur_marker_type = ''
marker_open = False
text_parts = []
for part in PYTHON_COMMENT_REGEX.finditer(src):
start, stop = part.start(), part.end()
# Init
if cur_marker_pos == 0:
cur_marker_type = src[start:stop].strip()
cur_marker_pos = stop
marker_open = True
else:
if marker_open:
if cur_marker_type == src[start:stop].strip():
# print("end of marker %s" % cur_marker_type)
# Store the text
# comments = cur_marker_type
comments = src[cur_marker_pos:stop-len(cur_marker_type)]
text_parts += [comments]
cur_marker_pos = stop
cur_marker_type = ''
marker_open = False
else:
cur_marker_pos = stop
cur_marker_type = src[start:stop].strip()
marker_open = True
return text_parts
|
hchauvet/beampy
|
beampy/functions.py
|
Python
|
gpl-3.0
| 28,913
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class CreateRoutersAction(BaseAction):
action = 'CreateRouters'
command = 'create-routers'
usage = '%(prog)s [-c <count>] [-N <router_name>] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-c', '--count', dest='count',
action='store', type=int, default=1,
help='the number of routers to create.')
parser.add_argument('-N', '--router_name', dest='router_name',
action='store', type=str, default='',
help='the short name of routers')
parser.add_argument('-s', '--security_group', dest='security_group',
action='store', type=str, default='',
help='ID of the security group you want to apply to router, use default security group if not specified')
parser.add_argument('-n', '--vpc_network', dest='vpc_network',
action='store', type=str, default=None,
help='VPC IP addresses range, currently support "192.168.0.0/16" or "172.16.0.0/16", required in zone pek3a')
parser.add_argument('-t', '--router_type', dest='router_type',
action='store', type=int, default=1,
help='0 - Medium, 1 - Small, 2 - large, 3 - extra-large')
@classmethod
def build_directive(cls, options):
required_params = {
'router_name': options.router_name,
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print('error: [%s] should be specified' % param)
return None
return {
'count' : options.count,
'router_name' : options.router_name,
'security_group': options.security_group,
'vpc_network': options.vpc_network,
'router_type': options.router_type,
}
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/router/create_routers.py
|
Python
|
apache-2.0
| 2,807
|
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import logging
import os
import time
import hashlib,urllib
from waveapi import simplejson as json
from pushy import utils
from pushy import pushy
class PushHandler(webapp.RequestHandler):
def __init__(self):
self._pushy = pushy.Pushy()
def post(self):
if self._pushy.handle_push(self.request.path, self.request.body):
self.response.out.write("OK")
else:
self.response.out.write("Malformed URL")
def main():
application = webapp.WSGIApplication([('/push.*', PushHandler)], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
chrismdp/pushy
|
pushy/receive.py
|
Python
|
mit
| 701
|
'''
_____ _ __ __ _ _
| __ \| | | \/ | | (_)
| | | | |__ __ _ _ __ ___ __ _ _ __ | \ / |_ _| |_ __ _
| | | | '_ \ / _` | '_ ` _ \ / _` | '__| | |\/| | | | | | |/ _` |
| |__| | | | | (_| | | | | | | (_| | | | | | | |_| | | | (_| |
|_____/|_| |_|\__,_|_| |_| |_|\__,_|_| |_| |_|\__,_|_|_|\__,_|
Python Snake by Dhamar Mulia
'''
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_DOWN, KEY_UP
from random import randint
WIDTH = 120
HEIGHT = 30
MAX_X = WIDTH - 2
MAX_Y = HEIGHT - 2
SNAKE_LENGTH = 5
SNAKE_X = SNAKE_LENGTH + 1
SNAKE_Y = 3
TIMEOUT = 100
class Snake(object):
REV_DIR_MAP = {
KEY_UP: KEY_DOWN, KEY_DOWN: KEY_UP,
KEY_LEFT: KEY_RIGHT, KEY_RIGHT: KEY_LEFT,
}
def __init__(self, x, y, window):
self.body_list = []
self.hit_score = 0
self.timeout = TIMEOUT
# buat body snake
for i in range(SNAKE_LENGTH, 0, -1):
self.body_list.append(Body(x - i, y))
# buat kepala snake
self.body_list.append(Body(x, y, '@'))
self.window = window
self.direction = KEY_RIGHT
self.last_head_coor = (x, y)
self.direction_map = {
KEY_UP: self.move_up,
KEY_DOWN: self.move_down,
KEY_LEFT: self.move_left,
KEY_RIGHT: self.move_right
}
@property
def score(self):
return 'Score : {}'.format(self.hit_score)
def add_body(self, body_list):
self.body_list.extend(body_list)
def eat_food(self, food):
food.reset()
body = Body(self.last_head_coor[0], self.last_head_coor[1])
self.body_list.insert(-1, body)
self.hit_score += 1
if self.hit_score % 3 == 0:
self.timeout -= 5
self.window.timeout(self.timeout)
@property
def nabrak(self):
return any([body.coor == self.kepala.coor
for body in self.body_list[:-1]])
def update(self):
# merubah posisi body snake dengan body yg didepannya,
# dimulai dari belakang
# for idx, body in enumerate(self.body_list[:-1]):
# body.x = self.body_list[idx+1].x
# body.y = self.body_list[idx+1].y
last_body = self.body_list.pop(0)
last_body.x = self.body_list[-1].x
last_body.y = self.body_list[-1].y
self.body_list.insert(-1, last_body)
self.last_head_coor = (self.kepala.x, self.kepala.y)
self.direction_map[self.direction]()
def change_direction(self, direction):
if direction != Snake.REV_DIR_MAP[self.direction]:
self.direction = direction
def render(self):
for body in self.body_list:
self.window.addstr(body.y, body.x, body.char)
@property
def kepala(self):
return self.body_list[-1]
@property
def coor(self):
return self.kepala.x, self.kepala.y
def move_up(self):
self.kepala.y -= 1
if self.kepala.y < 1:
self.kepala.y = MAX_Y
def move_down(self):
self.kepala.y += 1
if self.kepala.y > MAX_Y:
self.kepala.y = 1
def move_left(self):
self.kepala.x -= 1
if self.kepala.x < 1:
self.kepala.x = MAX_X
def move_right(self):
self.kepala.x += 1
if self.kepala.x > MAX_X:
self.kepala.x = 1
class Body(object):
def __init__(self, x, y, char='#'):
self.x = x
self.y = y
self.char = char
@property
def coor(self):
return self.x, self.y
class Food(object):
def __init__(self, window, char='*'):
self.x = randint(1, MAX_X)
self.y = randint(1, MAX_Y)
self.char = char
self.window = window
def render(self):
self.window.addstr(self.y, self.x, self.char)
def reset(self):
self.x = randint(1, MAX_X)
self.y = randint(1, MAX_Y)
if __name__ == '__main__':
curses.initscr()
window = curses.newwin(HEIGHT, WIDTH, 0, 0)
window.timeout(TIMEOUT)
window.keypad(1)
curses.noecho()
curses.curs_set(0)
window.border(0)
snake = Snake(SNAKE_X, SNAKE_Y, window)
food = Food(window, '&')
while True:
window.clear()
window.border(0)
snake.render()
food.render()
window.addstr(0, 5, snake.score)
event = window.getch()
if event == 27:
break
if event in [KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT]:
snake.change_direction(event)
if snake.kepala.x == food.x and snake.kepala.y == food.y:
snake.eat_food(food)
if event == 32:
key = -1
while key != 32:
key = window.getch()
snake.update()
if snake.nabrak:
break
curses.endwin()
|
dhamarmulia/dm-snake
|
main.py
|
Python
|
mit
| 4,911
|
'''
This script was created to aggregate the downlink, uplink, and signal data from the broadband drive test points.
- It aggregates fields from the points to roads segments based on proximity.
- The script was designed to run as a stand alone arcpy program but it could easily be adapted to a script tool interface.
- It was designed to run with ArcGIS 10.2
Created on Sep 19, 2014
@author: kwalker
'''
import arcpy, os, csv
class Fields (object):
def __init__(self):
self._fieldList = []
def getI(self, field):
return self._fieldList.index(field)
def getFieldList(self):
return self._fieldList
class DataPointFields(Fields):
def __init__(self, dataPoints, directionFieldName, speedFieldName):
self.objectId = arcpy.Describe(dataPoints).OIDFieldName
self.direction = directionFieldName
self.speed = speedFieldName
self._fieldList = [self.objectId, self.direction, self.speed]
class signalPointFields(Fields):
def __init__(self, signalPoints, signalFieldName):
self.objectId = arcpy.Describe(signalPoints).OIDFieldName
self.signal = signalFieldName
self._fieldList = [self.objectId, self.signal]
class NearTableFields(Fields):
def __init__(self):
self.inputId = 'IN_FID'
self.nearId = 'NEAR_FID'
self._fieldList = [self.inputId, self.nearId]
class DataStatsFields(Fields):
def __init__(self, dataFields, nearFields):
self.segmentId = nearFields.nearId
self.maxSpeed = 'MAX_{}'.format(dataFields.speed)
self.direction = dataFields.direction
self._fieldList = [self.segmentId, self.maxSpeed, self.direction]
class SignalStatsFields(Fields):
def __init__(self, signalFields, nearFields):
self.segmentId = nearFields.nearId
self.maxSignal= 'MAX_{}'.format(signalFields.signal)
self._fieldList = [self.segmentId, self.maxSignal]
class SegmentResult(object):
"""Stores the results of a single segment. Also contains static methods for writing a list
SegmentResults to CSV format."""
outputCsvFields = []
def __init__(self, segmentId):
self._segmentId = segmentId
self.signal = ""
self.uplink = ""
self.downlink = ""
if SegmentResult.outputCsvFields == []:#Just in case outputCsvFields doesn't get set, use class fields instead
SegmentResult.outputCsvFields = self.__dict__.keys()
def getRowList (self):
return [self._segmentId, self.signal, self.uplink, self.downlink]
def __str__(self):
return str(self._rowList)
@staticmethod
def appendResultCSV(segResult, outputFilePath):
with open(outputFilePath, "a") as outCSV:
outCSV.write("\n" + str(segResult))
@staticmethod
def createResultCSV(segResultList, outputFilePath):
"""Replaced by appending method"""
with open(outputFilePath, "wb") as outCSV:
writer = csv.writer(outCSV)
writer.writerow(SegmentResult.outputCsvFields)
writer.writerows(list(segRes.getRowList() for segRes in segResultList))
if __name__ == '__main__':
### Set variables in this section
#Path to gdb that contains points and segment lines
gdbPath = r'C:\Users\kwalker\Documents\Aptana Studio 3 Workspace\BroadbandDriveTestStats\data\TestData.gdb'
#Segment line layer
roads = '{}'.format(os.path.join(gdbPath, 'MultipleRoadSegments'))#Change the segmentlayer name here
#Point layer names and field names
dataPoints = '{}'.format(os.path.join(gdbPath,'DataPointFeatures'))#Change the data point layer name here
dataFields = DataPointFields (dataPoints, 'direction', 'speed')#Change direction and speed field names here
signalPoints = '{}'.format(os.path.join(gdbPath,'SignalPointFeatures'))#Change the signal point layer name here
signalFields = signalPointFields (signalPoints, 'signal')#Change signal field name here
#Ouput Csv file path. This csv will be created by the program.
outputCsvPath =r'C:\Users\kwalker\Documents\Aptana Studio 3 Workspace\BroadbandDriveTestStats\data\OutTest2.csv'
SegmentResult.outputCsvFields = ["segmentID", "signal", "uplink", "downlink"]#Number of CSV fields should match class fields in SegmentResult
#Distance that determines is points belong to a segment
bufferRadius = '0.11 Miles'#Unit text is required by GenerateNearTable_analysis
### End set section
tempGdb = 'in_memory'
dataNearTable = '{}'.format(os.path.join(tempGdb,'DataNear'))
nearFields = NearTableFields()
dataStats = '{}'.format(os.path.join(tempGdb, 'dataStats'))
dStatsFields = DataStatsFields(dataFields, nearFields)
signalNearTable = '{}'.format(os.path.join(tempGdb,'SignalNear'))
nearFields = NearTableFields()
signalStats = '{}'.format(os.path.join(tempGdb, 'signalStats'))
sStatsFields = SignalStatsFields(signalFields, nearFields)
results = {}
### Data Points
print "Begin data points near and stats analysis"
arcpy.GenerateNearTable_analysis (dataPoints, roads, dataNearTable,
search_radius = bufferRadius, closest = 'ALL', method = 'GEODESIC')
arcpy.JoinField_management (dataNearTable, nearFields.inputId, dataPoints,
dataFields.objectId, [dataFields.speed, dataFields.direction])
arcpy.Statistics_analysis (dataNearTable, dataStats,
[[dataFields.speed, 'MAX']], [dataFields.direction, nearFields.nearId])
#Create result objects from stats table
with arcpy.da.SearchCursor(dataStats, dStatsFields.getFieldList()) as dataCursor:
for row in dataCursor:
segmentId = int(row[dStatsFields.getI(dStatsFields.segmentId)])
direction = str(row[dStatsFields.getI(dStatsFields.direction)])
maxSpeed = str(row[dStatsFields.getI(dStatsFields.maxSpeed)])
if segmentId not in results:
results[segmentId] = SegmentResult(segmentId)
segResult = results[segmentId]
if direction == 'D':
segResult.downlink = maxSpeed
elif direction == 'U':
segResult.uplink = maxSpeed
### Signal points
print "Begin signal points near and stats analysis"
arcpy.GenerateNearTable_analysis (signalPoints, roads, signalNearTable,
search_radius = bufferRadius, closest = 'ALL', method = 'GEODESIC')
arcpy.JoinField_management (signalNearTable, nearFields.inputId, signalPoints,
signalFields.objectId, [signalFields.signal])
arcpy.Statistics_analysis (signalNearTable, signalStats,
[[signalFields.signal, 'MAX']], [nearFields.nearId])
#Create result objects from stats table
with arcpy.da.SearchCursor(signalStats, sStatsFields.getFieldList()) as signalCursor:
for row in signalCursor:
segmentId = int(row[sStatsFields.getI(sStatsFields.segmentId)])
maxSignal = str(row[sStatsFields.getI(sStatsFields.maxSignal)])
if segmentId not in results:
results[segmentId] = SegmentResult(segmentId)
segResult = results[segmentId]
segResult.signal = maxSignal
### Write results to a CSV file
SegmentResult.createResultCSV(results.values(), outputCsvPath)
arcpy.Delete_management(tempGdb)
|
agrc/Broadband-DriveTest-Stats
|
StatsAggregator.py
|
Python
|
mit
| 7,812
|
import json
import os
import logging
import sys
from pyssdb import pyssdb
CONFIG_FILE = 1
ARGUMENT_LIST = [
[CONFIG_FILE, "-f", "<config_file_name>", True],
]
def PrintUsage():
print("\n", __file__, "\n")
for argItem in ARGUMENT_LIST:
if argItem[3] == True:
print(" ", argItem[1], argItem[2])
else:
print(" ", argItem[1], argItem[2], "[OPTIONAL]")
print("")
def GetArguments(argv):
arguments = dict()
idx, argc = 0, len(argv)
while idx < argc:
for argItem in ARGUMENT_LIST:
if (argv[idx] == argItem[1]) and (idx < argc - 1):
idx = idx + 1
arguments[argItem[0]] = argv[idx]
idx = idx + 1
for argItem in ARGUMENT_LIST:
if (argItem[3] == True) and (argItem[0] not in arguments):
PrintUsage()
exit(0)
return arguments
def LoadConfig(filename):
fd = open(filename)
data = fd.read()
fd.close()
return data
if __name__ == '__main__':
args = GetArguments(sys.argv)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s-%(thread)06d-%(levelname)s: %(message)s", datefmt="%Y%m%d-%H%M%S")
confData = LoadConfig(args[CONFIG_FILE])
agentConf = json.loads(confData)
try:
logging.info("Connecting to %s:%i(%i) ...", agentConf["SSDB"]["Host"], agentConf["SSDB"]["Port"], agentConf["SSDB"]["Timeout"])
confSSDB = pyssdb.Client(host = agentConf["SSDB"]["Host"], port = agentConf["SSDB"]["Port"], socket_timeout = agentConf["SSDB"]["Timeout"])
logging.info("Connection established -- OK")
if ("Passcode" in agentConf["SSDB"]):
logging.info("Sending credential ...")
confSSDB.auth(agentConf["SSDB"]["Passcode"])
logging.info("Credential authenticated -- OK")
except Exception as ex:
print(ex)
finally:
if confSSDB != None:
logging.info("Disconnecting from SSDB ...")
confSSDB.disconnect()
logging.info("Finish -- OK")
|
waynechu/PythonProject
|
ssdbtest.py
|
Python
|
mit
| 2,065
|
import json, requests, getpass
from messages import messages_provider
messages = messages_provider.get()
#Need to implement this using OAuth.
class Bootstrap:
APP_NAME = "Git-events"
AUTHORIZATIONS_ENDPOINT = "https://api.github.com/authorizations"
def __init__(self, config):
self.config = config
def setup(self):
user = input(messages.INPUT_USERNAME)
password = getpass.getpass(messages.INPUT_PASSWORD)
if self.using_existing_token(user, password):
print(messages.SETUP_SUCCESS)
return True
if self.create_token(user, password):
print(messages.SETUP_SUCCESS)
return True
return False
def create_token(self, user, password):
token_request = dict()
token_request['note'] = Bootstrap.APP_NAME
token_request['scopes'] = ["notifications" , "repo"]
request = requests.post(Bootstrap.AUTHORIZATIONS_ENDPOINT, auth=(user, password), data=json.dumps(token_request))
if request.status_code != requests.codes.ok:
raise(messages.GITHUB_LOGIN_ERROR)
data = request.json()
self.config.set_value('Account', 'username', user)
self.config.set_value('Account', 'accesstoken', data.token)
return True
def using_existing_token(self, user, password):
request = requests.get(Bootstrap.AUTHORIZATIONS_ENDPOINT, auth=(user, password))
if request.status_code != requests.codes.ok:
print(messages.GITHUB_LOGIN_ERROR)
return False
data = request.json()
existing_tokens = list(filter(lambda token: token["note"] == Bootstrap.APP_NAME, data))
if not len(existing_tokens):
return False
self.config.set_value('Account', 'username', user)
self.config.set_value('Account', 'accesstoken', existing_tokens[0]["token"])
return True
|
iddl/git-events
|
bootstrap.py
|
Python
|
apache-2.0
| 1,909
|
# This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>.
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ca.settings")
app = Celery("ca")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
|
mathiasertl/django-ca
|
ca/ca/celery.py
|
Python
|
gpl-3.0
| 981
|
from matrix_client.client import MatrixClient, Room, User
import pytest
def test_create_client():
MatrixClient("http://example.com")
def test_sync_token():
client = MatrixClient("http://example.com")
assert client.get_sync_token() is None
client.set_sync_token("FAKE_TOKEN")
assert client.get_sync_token() is "FAKE_TOKEN"
def test__mkroom():
client = MatrixClient("http://example.com")
roomId = "!UcYsUzyxTGDxLBEvLz:matrix.org"
goodRoom = client._mkroom(roomId)
assert isinstance(goodRoom, Room)
assert goodRoom.room_id is roomId
with pytest.raises(ValueError):
client._mkroom("BAD_ROOM:matrix.org")
client._mkroom("!BAD_ROOMmatrix.org")
client._mkroom("!BAD_ROOM::matrix.org")
def test_get_rooms():
client = MatrixClient("http://example.com")
rooms = client.get_rooms()
assert isinstance(rooms, dict)
assert len(rooms) == 0
client = MatrixClient("http://example.com")
client._mkroom("!abc:matrix.org")
client._mkroom("!def:matrix.org")
client._mkroom("!ghi:matrix.org")
rooms = client.get_rooms()
assert isinstance(rooms, dict)
assert len(rooms) == 3
def test_bad_state_events():
client = MatrixClient("http://example.com")
room = client._mkroom("!abc:matrix.org")
ev = {
"tomato": False
}
client._process_state_event(ev, room)
def test_state_event():
client = MatrixClient("http://example.com")
room = client._mkroom("!abc:matrix.org")
room.name = False
room.topic = False
room.aliases = False
ev = {
"type": "m.room.name",
"content": {}
}
client._process_state_event(ev, room)
assert room.name is None
ev["content"]["name"] = "TestName"
client._process_state_event(ev, room)
assert room.name is "TestName"
ev["type"] = "m.room.topic"
client._process_state_event(ev, room)
assert room.topic is None
ev["content"]["topic"] = "TestTopic"
client._process_state_event(ev, room)
assert room.topic is "TestTopic"
ev["type"] = "m.room.aliases"
client._process_state_event(ev, room)
assert room.aliases is None
aliases = ["#foo:matrix.org", "#bar:matrix.org"]
ev["content"]["aliases"] = aliases
client._process_state_event(ev, room)
assert room.aliases is aliases
def test_get_user():
client = MatrixClient("http://example.com")
assert isinstance(client.get_user("@foobar:matrix.org"), User)
with pytest.raises(ValueError):
client.get_user("badfoobar:matrix.org")
client.get_user("@badfoobarmatrix.org")
client.get_user("@badfoobar:::matrix.org")
def test_get_download_url():
client = MatrixClient("http://example.com")
real_url = "http://example.com/_matrix/media/r0/download/foobar"
assert client.api.get_download_url("mxc://foobar") == real_url
with pytest.raises(ValueError):
client.api.get_download_url("http://foobar")
|
dovf/matrix-python-sdk
|
test/client_test.py
|
Python
|
apache-2.0
| 2,970
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Volume Type interface."""
from cinderclient import base
class VolumeType(base.Resource):
"""A Volume Type is the type of volume to be created."""
def __repr__(self):
return "<VolumeType: %s>" % self.name
def get_keys(self):
"""Get extra specs from a volume type.
:param vol_type: The :class:`VolumeType` to get extra specs from
"""
_resp, body = self.manager.api.client.get(
"/types/%s/extra_specs" %
base.getid(self))
return body["extra_specs"]
def set_keys(self, metadata):
"""Set extra specs on a volume type.
:param type : The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
"""
body = {'extra_specs': metadata}
return self.manager._create(
"/types/%s/extra_specs" % base.getid(self),
body,
"extra_specs",
return_raw=True)
def unset_keys(self, keys):
"""Unset extra specs on a volue type.
:param type_id: The :class:`VolumeType` to unset extra spec on
:param keys: A list of keys to be unset
"""
# NOTE(jdg): This wasn't actually doing all of the keys before
# the return in the loop resulted in ony ONE key being unset.
# since on success the return was NONE, we'll only interrupt the loop
# and return if there's an error
for k in keys:
resp = self.manager._delete(
"/types/%s/extra_specs/%s" % (
base.getid(self), k))
if resp is not None:
return resp
class VolumeTypeManager(base.ManagerWithFind):
"""Manage :class:`VolumeType` resources."""
resource_class = VolumeType
def list(self):
"""Get a list of all volume types.
:rtype: list of :class:`VolumeType`.
"""
return self._list("/types", "volume_types")
def get(self, volume_type):
"""Get a specific volume type.
:param volume_type: The ID of the :class:`VolumeType` to get.
:rtype: :class:`VolumeType`
"""
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
def delete(self, volume_type):
"""Delete a specific volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get.
"""
self._delete("/types/%s" % base.getid(volume_type))
def create(self, name):
"""Create a volume type.
:param name: Descriptive name of the volume type
:rtype: :class:`VolumeType`
"""
body = {
"volume_type": {
"name": name,
}
}
return self._create("/types", body, "volume_type")
|
ntt-sic/python-cinderclient
|
cinderclient/v2/volume_types.py
|
Python
|
apache-2.0
| 3,363
|
"""
GravityWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messeges that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
import math
class GravityWalker(DirectObject.DirectObject):
notify = directNotify.newCategory("GravityWalker")
wantDebugIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
wantFloorSphere = base.config.GetBool('want-floor-sphere', 0)
earlyEventSphere = base.config.GetBool('early-event-sphere', 0)
DiagonalFactor = math.sqrt(2.) / 2.
# special methods
def __init__(self, gravity = 64.348, standableGround=0.707,
hardLandingForce=16.0, legacyLifter=False):
assert self.notify.debugStateCall(self)
DirectObject.DirectObject.__init__(self)
self.__gravity=gravity
self.__standableGround=standableGround
self.__hardLandingForce=hardLandingForce
self._legacyLifter = legacyLifter
self.mayJump = 1
self.jumpDelayTask = None
self.controlsTask = None
self.indicatorTask = None
self.falling = 0
self.needToDeltaPos = 0
self.physVelocityIndicator=None
self.avatarControlForwardSpeed=0
self.avatarControlJumpForce=0
self.avatarControlReverseSpeed=0
self.avatarControlRotateSpeed=0
self.getAirborneHeight=None
self.priorParent=Vec3(0)
self.__oldPosDelta=Vec3(0)
self.__oldDt=0
self.moving=0
self.speed=0.0
self.rotationSpeed=0.0
self.slideSpeed=0.0
self.vel=Vec3(0.0)
self.collisionsActive = 0
self.isAirborne = 0
self.highMark = 0
"""
def spawnTest(self):
assert self.notify.debugStateCall(self)
if not self.wantDebugIndicator:
return
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.coghq import MovingPlatform
if hasattr(self, "platform"):
# Remove the prior instantiation:
self.moveIval.pause()
del self.moveIval
self.platform.destroy()
del self.platform
self.platform2.destroy()
del self.platform2
model = loader.loadModel('phase_9/models/cogHQ/platform1')
fakeId = id(self)
self.platform = MovingPlatform.MovingPlatform()
self.platform.setupCopyModel(fakeId, model, 'platformcollision')
self.platformRoot = render.attachNewNode("GravityWalker-spawnTest-%s"%fakeId)
self.platformRoot.setPos(base.localAvatar, Vec3(0.0, 0.0, 1.0))
self.platformRoot.setHpr(base.localAvatar, Vec3.zero())
self.platform.reparentTo(self.platformRoot)
self.platform2 = MovingPlatform.MovingPlatform()
self.platform2.setupCopyModel(1+fakeId, model, 'platformcollision')
self.platform2Root = render.attachNewNode("GravityWalker-spawnTest2-%s"%fakeId)
self.platform2Root.setPos(base.localAvatar, Vec3(-16.0, 30.0, 1.0))
self.platform2Root.setHpr(base.localAvatar, Vec3.zero())
self.platform2.reparentTo(self.platform2Root)
duration = 5
self.moveIval = Parallel(
Sequence(
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
Vec3(0.0, 30.0, 0.0),
name='platformOut%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
Vec3(0.0, 0.0, 0.0),
name='platformBack%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
Vec3(0.0, 0.0, 30.0),
name='platformUp%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
Vec3(0.0, 0.0, 0.0),
name='platformDown%s' % fakeId,
fluid = 1),
),
Sequence(
WaitInterval(0.3),
LerpPosInterval(self.platform2, duration,
Vec3(0.0, -30.0, 0.0),
name='platform2Out%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform2, duration,
Vec3(0.0, 30.0, 30.0),
name='platform2Back%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform2, duration,
Vec3(0.0, -30.0, 0.0),
name='platform2Up%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform2, duration,
Vec3(0.0, 0.0, 0.0),
name='platformDown%s' % fakeId,
fluid = 1),
),
name='platformIval%s' % fakeId,
)
self.moveIval.loop()
"""
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.notify.debugStateCall(self)
self.avatarControlForwardSpeed=forward
self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.speed, self.rotationSpeed, self.slideSpeed)
def getIsAirborne(self):
return self.isAirborne
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
pass # setup the avatar
def setupRay(self, bitmask, floorOffset, reach):
assert self.notify.debugStateCall(self)
# This is a ray cast from your head down to detect floor polygons.
# This ray start is arbitrarily high in the air. Feel free to use
# a higher or lower value depending on whether you want an avatar
# that is outside of the world to step up to the floor when they
# get under valid floor:
cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('GW.cRayNode')
cRayNode.addSolid(cRay)
self.cRayNodePath = self.avatarNodePath.attachNewNode(cRayNode)
cRayNode.setFromCollideMask(bitmask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
# set up floor collision mechanism
self.lifter = CollisionHandlerGravity()
#self.lifter = CollisionHandlerHighestEvent()
self.lifter.setLegacyMode(self._legacyLifter)
self.lifter.setGravity(self.__gravity)
self.lifter.addInPattern("enter%in")
self.lifter.addAgainPattern("again%in")
self.lifter.addOutPattern("exit%in")
self.lifter.setOffset(floorOffset)
self.lifter.setReach(reach)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
#self.lifter.setMaxVelocity(16.0)
self.lifter.addCollider(self.cRayNodePath, self.avatarNodePath)
def setupWallSphere(self, bitmask, avatarRadius):
"""
Set up the collision sphere
"""
assert self.notify.debugStateCall(self)
# This is a sphere on the ground to detect collisions with
# walls, but not the floor.
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius, avatarRadius)
cSphereNode = CollisionNode('GW.cWallSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# set up collision mechanism
if config.GetBool('want-fluid-pusher', 0):
self.pusher = CollisionHandlerFluidPusher()
else:
self.pusher = CollisionHandlerPusher()
self.pusher.addCollider(cSphereNodePath, self.avatarNodePath)
self.cWallSphereNodePath = cSphereNodePath
def setupEventSphere(self, bitmask, avatarRadius):
"""
Set up the collision sphere
"""
assert self.notify.debugStateCall(self)
# This is a sphere a little larger than the wall sphere to
# trigger events.
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius-0.1, avatarRadius*1.04)
# Mark it intangible just to emphasize its non-physical purpose.
cSphere.setTangible(0)
cSphereNode = CollisionNode('GW.cEventSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# set up collision mechanism
self.event = CollisionHandlerEvent()
self.event.addInPattern("enter%in")
self.event.addOutPattern("exit%in")
self.cEventSphereNodePath = cSphereNodePath
def setupFloorSphere(self, bitmask, avatarRadius):
"""
Set up the collision sphere
"""
assert self.notify.debugStateCall(self)
# This is a tiny sphere concentric with the wallSphere to keep
# us from slipping through floors.
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius, 0.01)
cSphereNode = CollisionNode('GW.cFloorSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# set up collision mechanism
self.pusherFloorhandler = CollisionHandlerPusher()
self.pusherFloor.addCollider(cSphereNodePath, self.avatarNodePath)
self.cFloorSphereNodePath = cSphereNodePath
def setWallBitMask(self, bitMask):
self.wallBitmask = bitMask
def setFloorBitMask(self, bitMask):
self.floorBitmask = bitMask
def swapFloorBitMask(self, oldMask, newMask):
self.floorBitmask = self.floorBitmask &~ oldMask
self.floorBitmask |= newMask
if self.cRayNodePath and not self.cRayNodePath.isEmpty():
self.cRayNodePath.node().setFromCollideMask(self.floorBitmask)
def setGravity(self, gravity):
self.__gravity = gravity
self.lifter.setGravity(self.__gravity)
def getGravity(self, gravity):
return self.__gravity
def initializeCollisions(self, collisionTraverser, avatarNodePath,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
floorOffset is how high the avatar can reach. I.e. if the avatar
walks under a ledge that is <= floorOffset above the ground (a
double floor situation), the avatar will step up on to the
ledge (instantly).
Set up the avatar collisions
"""
assert self.notify.debugStateCall(self)
assert not avatarNodePath.isEmpty()
self.avatarNodePath = avatarNodePath
self.cTrav = collisionTraverser
self.setupRay(self.floorBitmask, floorOffset, reach)
self.setupWallSphere(self.wallBitmask, avatarRadius)
self.setupEventSphere(self.wallBitmask, avatarRadius)
if self.wantFloorSphere:
self.setupFloorSphere(self.floorBitmask, avatarRadius)
self.setCollisionsActive(1)
def setTag(self, key, value):
self.cEventSphereNodePath.setTag(key, value)
def setAirborneHeightFunc(self, unused_parameter):
assert self.notify.debugStateCall(self)
self.getAirborneHeight = self.lifter.getAirborneHeight
def getAirborneHeight(self):
assert self.notify.debugStateCall(self)
self.lifter.getAirborneHeight()
def setAvatarPhysicsIndicator(self, indicator):
"""
indicator is a NodePath
"""
assert self.notify.debugStateCall(self)
self.cWallSphereNodePath.show()
def deleteCollisions(self):
assert self.notify.debugStateCall(self)
del self.cTrav
self.cWallSphereNodePath.removeNode()
del self.cWallSphereNodePath
if self.wantFloorSphere:
self.cFloorSphereNodePath.removeNode()
del self.cFloorSphereNodePath
del self.pusher
# del self.pusherFloor
del self.event
del self.lifter
del self.getAirborneHeight
def setCollisionsActive(self, active = 1):
assert self.notify.debugStateCall(self)
if self.collisionsActive != active:
self.collisionsActive = active
# Each time we change the collision geometry, make one
# more pass to ensure we aren't standing in a wall.
self.oneTimeCollide()
# make sure we have a shadow traverser
base.initShadowTrav()
if active:
if 1:
# Please let skyler or drose know if this is causing a problem
# This is a bit of a hack fix:
self.avatarNodePath.setP(0.0)
self.avatarNodePath.setR(0.0)
self.cTrav.addCollider(self.cWallSphereNodePath, self.pusher)
if self.wantFloorSphere:
self.cTrav.addCollider(self.cFloorSphereNodePath, self.pusherFloor)
# Add the lifter to the shadow traverser, which runs after
# our traverser. This prevents the "fall through wall and
# off ledge" bug. The problem was that we couldn't control
# which collided first, the wall pusher or the lifter, if
# they're in the same collision traverser. If the lifter
# collided first, we'd start falling before getting pushed
# back behind the wall.
base.shadowTrav.addCollider(self.cRayNodePath, self.lifter)
if self.earlyEventSphere:
# If we want to trigger the events at the same
# time as we intersect walls (e.g. Toontown, for
# backward compatibility issues), add the event
# sphere to the main traverser. This allows us to
# hit door triggers that are just slightly behind
# the door itself.
self.cTrav.addCollider(self.cEventSphereNodePath, self.event)
else:
# Normally, we'd rather trigger the events after
# the pusher has had a chance to fix up our
# position, so we never trigger things that are
# behind other polygons.
base.shadowTrav.addCollider(self.cEventSphereNodePath, self.event)
else:
if hasattr(self, 'cTrav'):
self.cTrav.removeCollider(self.cWallSphereNodePath)
if self.wantFloorSphere:
self.cTrav.removeCollider(self.cFloorSphereNodePath)
self.cTrav.removeCollider(self.cEventSphereNodePath)
base.shadowTrav.removeCollider(self.cEventSphereNodePath)
base.shadowTrav.removeCollider(self.cRayNodePath)
def getCollisionsActive(self):
assert self.debugPrint("getCollisionsActive() returning=%s"%(
self.collisionsActive,))
return self.collisionsActive
def placeOnFloor(self):
"""
Make a reasonable effor to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
assert self.notify.debugStateCall(self)
self.oneTimeCollide()
self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.lifter.getAirborneHeight())
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
assert self.notify.debugStateCall(self)
if not hasattr(self, 'cWallSphereNodePath'):
return
self.isAirborne = 0
self.mayJump = 1
tempCTrav = CollisionTraverser("oneTimeCollide")
tempCTrav.addCollider(self.cWallSphereNodePath, self.pusher)
if self.wantFloorSphere:
tempCTrav.addCollider(self.cFloorSphereNodePath, self.event)
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def setMayJump(self, task):
"""
This function's use is internal to this class (maybe I'll add
the __ someday). Anyway, if you want to enable or disable
jumping in a general way see the ControlManager (don't use this).
"""
assert self.notify.debugStateCall(self)
self.mayJump = 1
return Task.done
def startJumpDelay(self, delay):
assert self.notify.debugStateCall(self)
if self.jumpDelayTask:
self.jumpDelayTask.remove()
self.mayJump = 0
self.jumpDelayTask=taskMgr.doMethodLater(
delay,
self.setMayJump,
"jumpDelay-%s"%id(self))
def addBlastForce(self, vector):
self.lifter.addVelocity(vector.length())
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("w controls", "GravityWalker")
onScreenDebug.add("w airborneHeight", self.lifter.getAirborneHeight())
onScreenDebug.add("w falling", self.falling)
onScreenDebug.add("w isOnGround", self.lifter.isOnGround())
#onScreenDebug.add("w gravity", self.lifter.getGravity())
#onScreenDebug.add("w jumpForce", self.avatarControlJumpForce)
onScreenDebug.add("w contact normal", self.lifter.getContactNormal().pPrintValues())
onScreenDebug.add("w mayJump", self.mayJump)
onScreenDebug.add("w impact", self.lifter.getImpactVelocity())
onScreenDebug.add("w velocity", self.lifter.getVelocity())
onScreenDebug.add("w isAirborne", self.isAirborne)
onScreenDebug.add("w hasContact", self.lifter.hasContact())
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
# get the button states:
run = inputState.isSet("run")
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slideLeft = inputState.isSet("slideLeft")
slideRight = inputState.isSet("slideRight")
jump = inputState.isSet("jump")
# Check for Auto-Run
if 'localAvatar' in __builtins__:
if base.localAvatar and base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
# Slide speed is a scaled down version of forward speed
# Note: you can multiply a factor in here if you want slide to
# be slower than normal walk/run. Let's try full speed.
#self.slideSpeed=(slideLeft and -self.avatarControlForwardSpeed*0.75 or
# slideRight and self.avatarControlForwardSpeed*0.75)
self.slideSpeed=(reverse and slideLeft and -self.avatarControlReverseSpeed*0.75 or
reverse and slideRight and self.avatarControlReverseSpeed*0.75 or
slideLeft and -self.avatarControlForwardSpeed*0.75 or
slideRight and self.avatarControlForwardSpeed*0.75)
self.rotationSpeed=not (slideLeft or slideRight) and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
if self.speed and self.slideSpeed:
self.speed *= GravityWalker.DiagonalFactor
self.slideSpeed *= GravityWalker.DiagonalFactor
debugRunning = inputState.isSet("debugRunning")
if(debugRunning):
self.speed*=base.debugRunningMultiplier
self.slideSpeed*=base.debugRunningMultiplier
self.rotationSpeed*=1.25
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
if self.wantDebugIndicator:
self.displayDebugInfo()
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
assert self.debugPrint("isAirborne 0 due to isOnGround() true")
impact = self.lifter.getImpactVelocity()
if impact < -30.0:
messenger.send("jumpHardLand")
self.startJumpDelay(0.3)
else:
messenger.send("jumpLand")
if impact < -5.0:
self.startJumpDelay(0.2)
# else, ignore the little potholes.
assert self.isAirborne == 0
self.priorParent = Vec3.zero()
if jump and self.mayJump:
# The jump button is down and we're close
# enough to the ground to jump.
self.lifter.addVelocity(self.avatarControlJumpForce)
messenger.send("jumpStart")
self.isAirborne = 1
assert self.debugPrint("isAirborne 1 due to jump")
else:
if self.isAirborne == 0:
assert self.debugPrint("isAirborne 1 due to isOnGround() false")
self.isAirborne = 1
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
# How far did we move based on the amount of time elapsed?
self.__oldDt = ClockObject.getGlobalClock().getDt()
dt=self.__oldDt
# Check to see if we're moving at all:
self.moving = self.speed or self.slideSpeed or self.rotationSpeed or (self.priorParent!=Vec3.zero())
if self.moving:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
if distance or slideDistance or self.priorParent != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
if self.isAirborne:
forward = Vec3.forward()
else:
contact = self.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
# Consider commenting out this normalize. If you do so
# then going up and down slops is a touch slower and
# steeper terrain can cut the movement in half. Without
# the normalize the movement is slowed by the cosine of
# the slope (i.e. it is multiplied by the sign as a
# side effect of the cross product above).
forward.normalize()
self.vel=Vec3(forward * distance)
if slideDistance:
if self.isAirborne:
right = Vec3.right()
else:
right = forward.cross(contact)
# See note above for forward.normalize()
right.normalize()
self.vel=Vec3(self.vel + (right * slideDistance))
self.vel=Vec3(rotMat.xform(self.vel))
step=self.vel + (self.priorParent * dt)
self.avatarNodePath.setFluidPos(Point3(
self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
if self.moving or jump:
messenger.send("avatarMoving")
return Task.cont
def doDeltaPos(self):
assert self.notify.debugStateCall(self)
self.needToDeltaPos = 1
def setPriorParentVector(self):
assert self.notify.debugStateCall(self)
if __debug__:
onScreenDebug.add("__oldDt", "% 10.4f"%self.__oldDt)
onScreenDebug.add("self.__oldPosDelta",
self.__oldPosDelta.pPrintValues())
# avoid divide by zero crash - grw
if self.__oldDt == 0:
velocity = 0
else:
velocity = self.__oldPosDelta*(1.0/self.__oldDt)
self.priorParent = Vec3(velocity)
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.add("priorParent", self.priorParent.pPrintValues())
def reset(self):
assert self.notify.debugStateCall(self)
self.lifter.setVelocity(0.0)
self.priorParent=Vec3.zero()
def getVelocity(self):
return self.vel
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.notify.debugStateCall(self)
assert self.collisionsActive
#*#if __debug__:
#*# self.accept("control-f3", self.spawnTest) #*#
# remove any old
if self.controlsTask:
self.controlsTask.remove()
# spawn the new task
taskName = "AvatarControls-%s"%(id(self),)
self.controlsTask = taskMgr.add(self.handleAvatarControls, taskName, 25)
self.isAirborne = 0
self.mayJump = 1
if self.physVelocityIndicator:
if self.indicatorTask:
self.indicatorTask.remove()
self.indicatorTask = taskMgr.add(
self.avatarPhysicsIndicator,
"AvatarControlsIndicator-%s"%(id(self),), 35)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.notify.debugStateCall(self)
if self.controlsTask:
self.controlsTask.remove()
self.controlsTask = None
if self.indicatorTask:
self.indicatorTask.remove()
self.indicatorTask = None
if self.jumpDelayTask:
self.jumpDelayTask.remove()
self.jumpDelayTask = None
if __debug__:
self.ignore("control-f3") #*#
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
self.pusher.flush()
if self.wantFloorSphere:
self.floorPusher.flush()
self.event.flush()
self.lifter.flush() # not currently defined or needed
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
# There are sometimes issues if the collision ray height is
# so tall that it collides with multiple levels of floors.
def setCollisionRayHeight(self, height):
oldNode = self.avatarNodePath.getNode(0)
cRayNode = oldNode.getChild(2)
cRayNode.removeSolid(0)
cRay = CollisionRay(0.0, 0.0, height, 0.0, 0.0, -1.0)
cRayNode.addSolid(cRay)
|
jjkoletar/panda3d
|
direct/src/controls/GravityWalker.py
|
Python
|
bsd-3-clause
| 28,692
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import cyclone.web
from twisted.python import log
from twisted.internet import reactor
# Helper function to convert bytes to human readable strings
humanreadable = lambda s: [(s % 1024 ** i and "%.1f" % (s / 1024.0 ** i) or \
str(s / 1024 ** i)) + x.strip() + "B" \
for i, x in enumerate(' KMGTPEZY') \
if s < 1024 ** (i + 1) or i == 8][0]
class Application(cyclone.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
debug=True,
template_path="./template",
repository_path="./uploaded_files",
)
if not os.path.exists(settings["repository_path"]):
try:
os.mkdir(settings["repository_path"])
except Exception as e:
print("mkdir failed: %s" % str(e))
sys.exit(1)
cyclone.web.Application.__init__(self, handlers, **settings)
class IndexHandler(cyclone.web.RequestHandler):
def get(self):
self.render("index.html", missing=[], info=None)
def post(self):
name = self.get_argument("fullname", None)
if name is None:
self.render("index.html", missing=["fullname"], info=None)
return
picture = self.request.files.get("picture")
if picture is None:
self.render("index.html", missing=["picture"], info=None)
return
else:
picture = picture[0]
# File properties
filename = picture["filename"]
content_type = picture["content_type"]
body = picture["body"] # bytes!
try:
fn = os.path.join(self.settings.repository_path, filename)
fp = open(os.path.abspath(fn), "w")
fp.write(body)
fp.close()
except Exception as e:
log.msg("Could not write file: %s" % str(e))
raise cyclone.web.HTTPError(500)
self.render("index.html", missing=[], info={
"name": name,
"file": "%s, type=%s, size=%s" % \
(filename, content_type, humanreadable(len(body)))})
def main():
log.startLogging(sys.stdout)
reactor.listenTCP(8888, Application(), interface="127.0.0.1")
reactor.run()
if __name__ == "__main__":
main()
|
fiorix/cyclone
|
demos/upload/uploaddemo.py
|
Python
|
apache-2.0
| 3,092
|
from nba_py import game
def test():
gid = '0041400122'
assert game.BoxscoreSummary(gid)
assert game.Boxscore(gid)
assert game.BoxscoreScoring(gid)
assert game.BoxscoreUsage(gid)
assert game.BoxscoreMisc(gid)
assert game.BoxscoreAdvanced(gid)
assert game.BoxscoreFourFactors(gid)
assert game.PlayByPlay(gid)
|
mcdallas/nba_py
|
tests/test_nba_py_game.py
|
Python
|
bsd-3-clause
| 344
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Application-wide functionality.
Applications
------------
Most applications need only call :func:`run` after creating one or more
windows to begin processing events. For example, a simple application
consisting of one window is::
import pyglet
win = pyglet.window.Window()
pyglet.app.run()
Events
======
To handle events on the main event loop, instantiate it manually. The
following example exits the application as soon as any window is closed (the
default policy is to wait until all windows are closed)::
event_loop = pyglet.app.EventLoop()
@event_loop.event
def on_window_close(window):
event_loop.exit()
:since: pyglet 1.1
:attr:`event_loop` is the global event loop. Applications can replace this
with their own subclass of :class:`EventLoop` before calling
:meth:`EventLoop.run`.
:attr:`platform_event_loop` is the platform-dependent event loop.
Applications must not subclass or replace this :class:`PlatformEventLoop`
object.
"""
import weakref
class AppException(Exception):
pass
displays = weakref.WeakSet()
'''Set of all open displays. Instances of :class:`pyglet.canvas.Display`
are automatically added to this set upon construction. The set uses weak
references, so displays are removed from the set when they are no longer
referenced.
:deprecated: Use :func:`pyglet.canvas.get_display`.
:type: :class:`weakref.WeakSet`
'''
windows = weakref.WeakSet()
"""Set of all open windows (including invisible windows). Instances of
:class:`pyglet.window.Window` are automatically added to this set upon
construction. The set uses weak references, so windows are removed from
the set when they are no longer referenced or are closed explicitly.
"""
def run():
"""Begin processing events, scheduled functions and window updates.
This is a convenience function, equivalent to::
pyglet.app.event_loop.run()
"""
event_loop.run()
def exit():
"""Exit the application event loop.
Causes the application event loop to finish, if an event loop is currently
running. The application may not necessarily exit (for example, there may
be additional code following the `run` invocation).
This is a convenience function, equivalent to::
event_loop.exit()
"""
event_loop.exit()
from pyglet.app.base import EventLoop
from pyglet import compat_platform
if compat_platform == 'darwin':
from pyglet.app.cocoa import CocoaEventLoop as PlatformEventLoop
elif compat_platform in ('win32', 'cygwin'):
from pyglet.app.win32 import Win32EventLoop as PlatformEventLoop
else:
from pyglet.app.xlib import XlibEventLoop as PlatformEventLoop
event_loop = EventLoop()
platform_event_loop = PlatformEventLoop()
|
bitcraft/pyglet
|
pyglet/app/__init__.py
|
Python
|
bsd-3-clause
| 4,453
|
#!/usr/bin/env python
import paramiko
import time
from getpass import getpass
def prevent_paging(remote_conn):
''' stop pagination '''
remote_conn.send("\n")
remote_conn.send("term len 0\n")
time.sleep(1)
''' clear output buffer '''
output = remote_conn.recv(1000)
return output
def close_connection(remote_conn):
''' close SSH connection '''
remote_conn.close()
def start_config_mode(remote_conn):
''' get into configuration mode on Cisco gear '''
remote_conn.send("\n")
remote_conn.send("config t\n")
time.sleep(1)
def exit_config_mode(remote_conn):
''' leave config mode '''
remote_conn.send("\n")
remote_conn.send("end\n")
if __name__ == '__main__':
''' set static variables '''
device = '184.105.247.71'
username = 'pyclass'
password = getpass()
''' initialize variables '''
remote_conn_pre = paramiko.SSHClient()
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
''' connect to device '''
remote_conn_pre.connect(device, username=username, password=password, look_for_keys=False, allow_agent=False)
remote_conn = remote_conn_pre.invoke_shell()
''' go into configuration mode '''
start_config_mode(remote_conn)
''' send config change commands to device '''
remote_conn.send("\n")
remote_conn.send("logging buffered 99999\n")
time.sleep(1)
''' exit configuration mode '''
exit_config_mode(remote_conn)
''' disable paging using function '''
prevent_paging(remote_conn)
''' send command to device and print results '''
remote_conn.send("\n")
remote_conn.send("sho run | inc buffered\n")
time.sleep(1)
output = remote_conn.recv(50000)
print output
''' close connection using function '''
close_connection(remote_conn)
|
daveg999/automation_class
|
class4/class4_ex2.py
|
Python
|
apache-2.0
| 1,853
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import uuid
import mock
from oslo.config import cfg
from sqlalchemy.orm import query
from neutron.common import constants
from neutron.common import topics
from neutron import context as q_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import timeutils
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
HOST = 'my_l3_host'
FIRST_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_2 = 'my_l3_host_2'
SECOND_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_2,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_3 = 'my_l3_host_3'
THIRD_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_3,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_4 = 'my_l3_host_4'
FOURTH_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_4,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR = 'my_l3_host_dvr'
DVR_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DVR_SNAT_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR_SNAT,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr_snat'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def test_auto_schedule_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, 'get_routers_to_schedule'),
mock.patch.object(self.scheduler, 'get_routers_can_schedule')) as (
gs, gr):
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertTrue(result)
self.assertTrue(gs.called)
self.assertTrue(gr.called)
def test_auto_schedule_routers_no_agents(self):
self.plugin.get_enabled_agent_on_host.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_unscheduled_routers(self):
type(self.plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=[]))
with mock.patch.object(self.scheduler,
'get_routers_to_schedule') as mock_routers:
mock_routers.return_value = []
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_target_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, 'get_routers_to_schedule'),
mock.patch.object(self.scheduler, 'get_routers_can_schedule')) as (
mock_unscheduled_routers, mock_target_routers):
mock_unscheduled_routers.return_value = mock.ANY
mock_target_routers.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_get_routers_to_schedule_with_router_ids(self):
router_ids = ['foo_router_1', 'foo_router_2']
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
self.plugin.get_routers.return_value = expected_routers
with mock.patch.object(self.scheduler,
'filter_unscheduled_routers') as mock_filter:
mock_filter.return_value = expected_routers
unscheduled_routers = self.scheduler.get_routers_to_schedule(
mock.ANY, self.plugin, router_ids)
mock_filter.assert_called_once_with(
mock.ANY, self.plugin, expected_routers)
self.assertEqual(expected_routers, unscheduled_routers)
def test_get_routers_to_schedule_without_router_ids(self):
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
with mock.patch.object(self.scheduler,
'get_unscheduled_routers') as mock_get:
mock_get.return_value = expected_routers
unscheduled_routers = self.scheduler.get_routers_to_schedule(
mock.ANY, self.plugin)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def test_get_routers_to_schedule_exclude_distributed(self):
routers = [
{'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
]
expected_routers = [{'id': 'foo_router_2'}]
with mock.patch.object(self.scheduler,
'get_unscheduled_routers') as mock_get:
mock_get.return_value = routers
unscheduled_routers = self.scheduler.get_routers_to_schedule(
mock.ANY, self.plugin,
router_ids=None, exclude_distributed=True)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def _test_get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler.get_routers_can_schedule(
mock.ANY, self.plugin, routers, mock.ANY)
self.assertEqual(target_routers, result)
def _test_filter_unscheduled_routers(self, routers, agents, expected):
self.plugin.get_l3_agents_hosting_routers.return_value = agents
unscheduled_routers = self.scheduler.filter_unscheduled_routers(
mock.ANY, self.plugin, routers)
self.assertEqual(expected, unscheduled_routers)
def test_filter_unscheduled_routers_already_scheduled(self):
self._test_filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
[{'id': 'foo_agent_id'}], [])
def test_filter_unscheduled_routers_non_scheduled(self):
self._test_filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
def test_get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test_get_routers_can_schedule(routers, mock.ANY, routers)
def test_get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test_get_routers_can_schedule(routers, None, [])
def test_bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler.bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
def _test_bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agents_db.Agent(id='foo_agent')
with contextlib.nested(
mock.patch.object(self.scheduler, 'router_has_binding',
return_value=has_binding),
mock.patch.object(self.scheduler, 'create_ha_router_binding')) as (
mock_has_binding, mock_bind):
self.scheduler.bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
'foo_agent')
self.assertEqual(not has_binding, mock_bind.called)
def test_bind_routers_ha_has_binding(self):
self._test_bind_routers_ha(has_binding=True)
def test_bind_routers_ha_no_binding(self):
self._test_bind_routers_ha(has_binding=False)
class L3SchedulerBaseMixin(object):
def _register_l3_agent(self, agent, plugin=None):
if not plugin:
plugin = self.plugin
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
agent_db = plugin.get_agents_db(self.adminContext,
filters={'host': [agent['host']]})
return agent_db[0]
def _register_l3_agents(self, plugin=None):
self.agent1 = self._register_l3_agent(FIRST_L3_AGENT, plugin)
self.agent_id1 = self.agent1.id
self.agent2 = self._register_l3_agent(SECOND_L3_AGENT, plugin)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR]})
self.l3_dvr_agent = agent_db[0]
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_SNAT_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR_SNAT]})
self.l3_dvr_snat_id = agent_db[0].id
self.l3_dvr_snat_agent = agent_db[0]
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = None
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid, auto_s, gr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent(distributed=False,
already_scheduled=False)
def test_add_distributed_router_to_l3_agent(self):
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=False)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(distributed=False,
already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(
plugin, 'get_snat_bindings', return_value=False)
):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_dvr_router_with_snatbinding_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {'id': 'foo_router_id',
'distributed': True}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(plugin, 'get_snat_bindings', return_value=True)):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(
plugin, 'get_snat_bindings', return_value=False)
):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(len(candidates), count)
if count:
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = FIRST_L3_AGENT.get('host')
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
exp_host = DVR_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
exp_host = DVR_L3_AGENT.get('host')
router['distributed'] = True
# Test no VMs present case
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
# check centralized test case
router['distributed'] = False
exp_host = DVR_SNAT_L3_AGENT.get('host')
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def _prepare_check_ports_exist_tests(self):
l3_agent = agents_db.Agent()
l3_agent.admin_state_up = True
l3_agent.host = HOST
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
self.plugin.get_ports = mock.Mock(return_value=[])
self.get_subnet_ids_on_router = mock.Mock(return_value=[])
return l3_agent, router
def test_check_ports_exist_on_l3agent_no_subnets(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no subnets
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_no_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no matching subnet
self.get_subnet_ids_on_router.return_value = [str(uuid.uuid4())]
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# matching subnet
port = {'subnet_id': str(uuid.uuid4()),
'binding:host_id': HOST,
'device_owner': 'compute:',
'id': 1234}
self.plugin.get_ports.return_value = [port]
self.plugin.get_subnet_ids_on_router = mock.Mock(
return_value=[port['subnet_id']])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertTrue(val)
class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
l3_db.L3_NAT_db_mixin,
common_db_mixin.CommonDbMixin,
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3_plugin.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCase, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext,
FIRST_L3_AGENT['host'],
[r1['router']['id']])
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(FIRST_L3_AGENT['host'], agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_portid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_port', return_value=dvr_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_check_ports_active_on_host_and_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
def _test_dvr_serviced_port_exists_on_subnet(self, port):
with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', return_value=[port]):
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost',
'dvr1-intf-id',
'my-subnet-id')
self.assertTrue(result)
def test_dvr_serviced_vip_port_exists_on_subnet(self):
vip_port = {
'id': 'lbaas-vip-port1',
'device_id': 'vip-pool-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.1'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=vip_port)
def test_dvr_serviced_dhcp_port_exists_on_subnet(self):
dhcp_port = {
'id': 'dhcp-port1',
'device_id': 'dhcp-net-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.2'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port)
def _prepare_schedule_snat_tests(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
return agent, router
def test_schedule_router_unbind_snat_servicenode_negativetest(self):
router = {
'id': 'foo_router_id',
'distributed': True
}
with contextlib.nested(
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'get_snat_bindings'),
mock.patch.object(self.dut, 'unbind_snat_servicenode')
) as (mock_rd, mock_snat_bind, mock_unbind):
mock_rd.return_value = router
mock_snat_bind.return_value = False
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertFalse(mock_unbind.called)
def test_schedule_snat_router_with_snat_candidates(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(query.Query, 'first'),
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),
mock.patch.object(self.dut, 'bind_snat_servicenode')) as (
mock_query, mock_agents,
mock_candidates, mock_rd, mock_dvr, mock_bind):
mock_rd.return_value = router
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
def test_unbind_snat_servicenode(self):
router_id = 'foo_router_id'
core_plugin = mock.PropertyMock()
type(self.dut)._core_plugin = core_plugin
(self.dut._core_plugin.get_ports_on_host_by_subnet.
return_value) = []
core_plugin.reset_mock()
l3_notifier = mock.PropertyMock()
type(self.dut).l3_rpc_notifier = l3_notifier
binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
router_id=router_id, l3_agent_id='foo_l3_agent_id',
l3_agent=agents_db.Agent())
with contextlib.nested(
mock.patch.object(query.Query, 'one'),
mock.patch.object(self.adminContext.session, 'delete'),
mock.patch.object(query.Query, 'delete'),
mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as (
mock_query, mock_session, mock_delete, mock_get_subnets):
mock_query.return_value = binding
mock_get_subnets.return_value = ['foo_subnet_id']
self.dut.unbind_snat_servicenode(self.adminContext, router_id)
mock_get_subnets.assert_called_with(self.adminContext, router_id)
self.assertTrue(mock_session.call_count)
self.assertTrue(mock_delete.call_count)
core_plugin.assert_called_once_with()
l3_notifier.assert_called_once_with()
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha"]
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin,
testlib_plugin.PluginSetupHelper):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = q_context.get_admin_context()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated').start()
cfg.CONF.set_override('max_l3_agents_per_router', 0)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
def _create_ha_router(self, ha=True, tenant_id='tenant1'):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
return self.plugin.create_router(self.adminContext,
{'router': router})
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = self._register_l3_agent(THIRD_L3_AGENT, plugin)
self.agent_id3 = self.agent3.id
self.agent4 = self._register_l3_agent(FOURTH_L3_AGENT, plugin)
self.agent_id4 = self.agent4.id
def test_get_ha_routers_l3_agents_count(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'])
self.plugin.schedule_router(self.adminContext, router2['id'])
self.plugin.schedule_router(self.adminContext, router3['id'])
result = self.plugin.get_ha_routers_l3_agents_count(
self.adminContext).all()
self.assertEqual(2, len(result))
self.assertIn((router1['id'], router1['tenant_id'], 4), result)
self.assertIn((router2['id'], router2['tenant_id'], 4), result)
self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY),
result)
def test_get_ordered_l3_agents_by_num_routers(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def test_reschedule_ha_routers_from_down_agents(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, self.agent1.host, None)
self.plugin.auto_schedule_routers(
self.adminContext, self.agent2.host, None)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = self._register_l3_agent(THIRD_L3_AGENT)
self.agent_id3 = agent.id
routers_to_auto_schedule = [router['id']] if specific_router else []
self.plugin.auto_schedule_routers(self.adminContext,
THIRD_L3_AGENT['host'],
routers_to_auto_schedule)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext,
THIRD_L3_AGENT['host'],
routers_to_auto_schedule)
def test_scheduler_with_ha_enabled_not_enough_agent(self):
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = self._register_l3_agent(THIRD_L3_AGENT, plugin)
self.agent_id3 = agent.id
agent = self._register_l3_agent(FOURTH_L3_AGENT, plugin)
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
|
cernops/neutron
|
neutron/tests/unit/test_l3_schedulers.py
|
Python
|
apache-2.0
| 53,905
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ResourceGroupPaged(Paged):
"""
A paging container for iterating over a list of :class:`ResourceGroup <azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroup>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ResourceGroup]'}
}
def __init__(self, *args, **kwargs):
super(ResourceGroupPaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/resource_group_paged.py
|
Python
|
mit
| 968
|
#!/usr/bin/env python
# coding: utf8
"""Example of a spaCy v2.0 pipeline component that requests all countries via
the REST Countries API, merges country names into one token, assigns entity
labels and sets attributes on country tokens, e.g. the capital and lat/lng
coordinates. Can be extended with more details from the API.
* REST Countries API: https://restcountries.eu (Mozilla Public License MPL 2.0)
* Custom pipeline components: https://spacy.io//usage/processing-pipelines#custom-components
Compatible with: spaCy v2.0.0+
Prerequisites: pip install requests
"""
from __future__ import unicode_literals, print_function
import requests
import plac
from spacy.lang.en import English
from spacy.matcher import PhraseMatcher
from spacy.tokens import Doc, Span, Token
def main():
# For simplicity, we start off with only the blank English Language class
# and no model or pre-defined pipeline loaded.
nlp = English()
rest_countries = RESTCountriesComponent(nlp) # initialise component
nlp.add_pipe(rest_countries) # add it to the pipeline
doc = nlp(u"Some text about Colombia and the Czech Republic")
print('Pipeline', nlp.pipe_names) # pipeline contains component name
print('Doc has countries', doc._.has_country) # Doc contains countries
for token in doc:
if token._.is_country:
print(token.text, token._.country_capital, token._.country_latlng,
token._.country_flag) # country data
print('Entities', [(e.text, e.label_) for e in doc.ents]) # entities
class RESTCountriesComponent(object):
"""spaCy v2.0 pipeline component that requests all countries via
the REST Countries API, merges country names into one token, assigns entity
labels and sets attributes on country tokens.
"""
name = 'rest_countries' # component name, will show up in the pipeline
def __init__(self, nlp, label='GPE'):
"""Initialise the pipeline component. The shared nlp instance is used
to initialise the matcher with the shared vocab, get the label ID and
generate Doc objects as phrase match patterns.
"""
# Make request once on initialisation and store the data
r = requests.get('https://restcountries.eu/rest/v2/all')
r.raise_for_status() # make sure requests raises an error if it fails
countries = r.json()
# Convert API response to dict keyed by country name for easy lookup
# This could also be extended using the alternative and foreign language
# names provided by the API
self.countries = {c['name']: c for c in countries}
self.label = nlp.vocab.strings[label] # get entity label ID
# Set up the PhraseMatcher with Doc patterns for each country name
patterns = [nlp(c) for c in self.countries.keys()]
self.matcher = PhraseMatcher(nlp.vocab)
self.matcher.add('COUNTRIES', None, *patterns)
# Register attribute on the Token. We'll be overwriting this based on
# the matches, so we're only setting a default value, not a getter.
# If no default value is set, it defaults to None.
Token.set_extension('is_country', default=False)
Token.set_extension('country_capital', default=False)
Token.set_extension('country_latlng', default=False)
Token.set_extension('country_flag', default=False)
# Register attributes on Doc and Span via a getter that checks if one of
# the contained tokens is set to is_country == True.
Doc.set_extension('has_country', getter=self.has_country)
Span.set_extension('has_country', getter=self.has_country)
def __call__(self, doc):
"""Apply the pipeline component on a Doc object and modify it if matches
are found. Return the Doc, so it can be processed by the next component
in the pipeline, if available.
"""
matches = self.matcher(doc)
spans = [] # keep the spans for later so we can merge them afterwards
for _, start, end in matches:
# Generate Span representing the entity & set label
entity = Span(doc, start, end, label=self.label)
spans.append(entity)
# Set custom attribute on each token of the entity
# Can be extended with other data returned by the API, like
# currencies, country code, flag, calling code etc.
for token in entity:
token._.set('is_country', True)
token._.set('country_capital', self.countries[entity.text]['capital'])
token._.set('country_latlng', self.countries[entity.text]['latlng'])
token._.set('country_flag', self.countries[entity.text]['flag'])
# Overwrite doc.ents and add entity – be careful not to replace!
doc.ents = list(doc.ents) + [entity]
for span in spans:
# Iterate over all spans and merge them into one token. This is done
# after setting the entities – otherwise, it would cause mismatched
# indices!
span.merge()
return doc # don't forget to return the Doc!
def has_country(self, tokens):
"""Getter for Doc and Span attributes. Returns True if one of the tokens
is a country. Since the getter is only called when we access the
attribute, we can refer to the Token's 'is_country' attribute here,
which is already set in the processing step."""
return any([t._.get('is_country') for t in tokens])
if __name__ == '__main__':
plac.call(main)
# Expected output:
# Pipeline ['rest_countries']
# Doc has countries True
# Colombia Bogotá [4.0, -72.0] https://restcountries.eu/data/col.svg
# Czech Republic Prague [49.75, 15.5] https://restcountries.eu/data/cze.svg
# Entities [('Colombia', 'GPE'), ('Czech Republic', 'GPE')]
|
aikramer2/spaCy
|
examples/pipeline/custom_component_countries_api.py
|
Python
|
mit
| 5,903
|
#!/usr/bin/env python
''' Plot a cdf from a csv file
File: plot_CDF_from_file.py
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "h.holtkamp@gmail.com"
__status__ = "Development"
def plot_cdf_from_file(filename):
"""Open file, store cdf to .pdf and .png"""
import numpy as np
import matplotlib.pyplot as plt
import pylab as P
data = np.genfromtxt(filename, delimiter=',')
# SINR data is best presented in dB
from utils import utils
data = utils.WTodB(data)
import cdf_plot
label = [ "Iteration %d" %i for i in np.arange(data.shape[0])+1]
cdf_plot.cdf_plot(data, '-', label=label)
# plt.xlabel(xlabel)
# plt.ylabel(ylabel)
# plt.title(title)
P.arrow( 0, 50, 40, 0, fc="k", ec="k",
head_width=3, head_length=5 )
plt.savefig(filename+'.pdf', format='pdf')
plt.savefig(filename+'.png', format='png')
if __name__ == '__main__':
import sys
filename = sys.argv[1]
plot_cdf_from_file(filename)
|
ryklith/pyltesim
|
plotting/plot_CDF_from_file.py
|
Python
|
gpl-2.0
| 1,103
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('redirects', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='redirect',
name='regular_expression',
field=models.BooleanField(default=False, help_text=b"This will allow using regular expressions to match and replace patterns in URLs. See the <a href='https://docs.python.org/2/library/re.html' target='_blank'>Python regular expression documentation for details."),
),
migrations.AddField(
model_name='redirect',
name='test_path',
field=models.CharField(help_text=b'You will need to specify a test path to ensure your regular expression is valid.', max_length=200, null=True, blank=True),
),
]
|
onespacemedia/cms-redirects
|
redirects/migrations/0002_auto_20160805_1654.py
|
Python
|
mit
| 907
|
# -*- coding: UTF-8 -*-
r"""\
Duct is a library for running child processes. Duct makes it easy to build
pipelines and redirect IO like a shell. At the same time, Duct helps you write
correct, portable code: whitespace is never significant, errors from child
processes get reported by default, and a variety of `gotchas, bugs, and
platform inconsistencies
<https://github.com/oconnor663/duct.py/blob/master/gotchas.md>`_ are handled
for you the Right Way™.
- `GitHub repo <https://github.com/oconnor663/duct.rs>`_
- `PyPI package <https://pypi.python.org/pypi/duct>`_
- `the same library, in Rust <https://github.com/oconnor663/duct.rs>`_
Examples
--------
Run a command without capturing any output. Here "hi" is printed directly to
the terminal:
>>> from duct import cmd
>>> cmd("echo", "hi").run() # doctest: +SKIP
hi
Output(status=0, stdout=None, stderr=None)
Capture the standard output of a command. Here "hi" is returned as a string:
>>> cmd("echo", "hi").read()
'hi'
Capture the standard output of a pipeline:
>>> cmd("echo", "hi").pipe(cmd("sed", "s/i/o/")).read()
'ho'
Merge standard error into standard output and read both incrementally:
>>> big_cmd = cmd("bash", "-c", "echo out && echo err 1>&2")
>>> reader = big_cmd.stderr_to_stdout().reader()
>>> with reader:
... reader.readlines()
[b'out\n', b'err\n']
Children that exit with a non-zero status raise an exception by default:
>>> cmd("false").run()
Traceback (most recent call last):
...
duct.StatusError: Expression cmd('false') returned non-zero exit status: Output(status=1, stdout=None, stderr=None)
>>> cmd("false").unchecked().run()
Output(status=1, stdout=None, stderr=None)
""" # noqa: E501
from collections import namedtuple
from contextlib import contextmanager
import io
import os
import shutil
import signal
import subprocess
import threading
try:
from pathlib import PurePath
except ImportError:
# a dummy class that nothing will ever be an instance of
class PurePath:
pass
try:
# not defined in Python 2
PIPE_CLOSED_ERROR = BrokenPipeError
except NameError:
PIPE_CLOSED_ERROR = IOError
HAS_WAITID = "waitid" in dir(os)
# Expression and handle types.
# TODO: Replace this with enum when we no longer support Python 2.
CMD = 0
PIPE = 1
STDIN_BYTES = 2
STDIN_PATH = 3
STDIN_FILE = 4
STDIN_NULL = 5
STDOUT_PATH = 6
STDOUT_FILE = 7
STDOUT_NULL = 8
STDOUT_CAPTURE = 9
STDOUT_TO_STDERR = 10
STDERR_PATH = 11
STDERR_FILE = 12
STDERR_NULL = 13
STDERR_CAPTURE = 14
STDERR_TO_STDOUT = 15
STDOUT_STDERR_SWAP = 16
DIR = 17
ENV = 18
ENV_REMOVE = 19
FULL_ENV = 20
UNCHECKED = 21
BEFORE_SPAWN = 22
NAMES = {
CMD: "cmd",
PIPE: "pipe",
STDIN_BYTES: "stdin_bytes",
STDIN_PATH: "stdin_path",
STDIN_FILE: "stdin_file",
STDIN_NULL: "stdin_null",
STDOUT_PATH: "stdout_path",
STDOUT_FILE: "stdout_file",
STDOUT_NULL: "stdout_null",
STDOUT_CAPTURE: "stdout_capture",
STDOUT_TO_STDERR: "stdout_to_stderr",
STDERR_PATH: "stderr_path",
STDERR_FILE: "stderr_file",
STDERR_NULL: "stderr_null",
STDERR_CAPTURE: "stderr_capture",
STDERR_TO_STDOUT: "stderr_to_stdout",
STDOUT_STDERR_SWAP: "stdout_stderr_swap",
DIR: "dir",
ENV: "env",
ENV_REMOVE: "env_remove",
FULL_ENV: "full_env",
UNCHECKED: "unchecked",
BEFORE_SPAWN: "before_spawn",
}
def cmd(prog, *args):
r"""Build a command :class:`Expression` from a program name and any number
of arguments.
This is the sole entry point to Duct. All the types below are built with
methods on the :class:`Expression` returned by this function.
>>> cmd("echo", "hi").read()
'hi'
"""
return Expression(CMD, None, (prog, args))
class Expression:
r"""An expression object representing a command or a pipeline of commands.
Build command expressions with the :func:`cmd` function. Build pipelines
with the :func:`pipe` method. Methods like :func:`stdout_path` and
:func:`env` also return new expressions representing the modified execution
environment. Execute expressions with :func:`run`, :func:`read`,
:func:`start`, or :func:`reader`.
"""
def __init__(self, _type, inner, payload=None):
self._type = _type
self._inner = inner
self._payload = payload
def __repr__(self):
return repr_expression(self)
def run(self):
r"""Execute the expression and return an :class:`Output`, which includes
the exit status and any captured output. Raise an exception if the
status is non-zero.
>>> cmd("true").run()
Output(status=0, stdout=None, stderr=None)
"""
return self.start().wait()
def read(self):
r"""Execute the expression and capture its output, similar to backticks
or $() in the shell.
This is a wrapper around reader() which reads to EOF, decodes UTF-8,
trims newlines, and returns the resulting string.
>>> cmd("echo", "hi").read()
'hi'
"""
stdout_bytes = self.reader().read()
stdout_str = decode_with_universal_newlines(stdout_bytes)
return stdout_str.rstrip('\n')
def start(self):
r"""Start executing the expression and return a :class:`Handle`.
Calling :func:`start` followed by :func:`Handle.wait` is equivalent to
:func:`run`.
>>> handle = cmd("echo", "hi").stdout_capture().start()
>>> # Do some other stuff.
>>> handle.wait()
Output(status=0, stdout=b'hi\n', stderr=None)
Note that leaking a :class:`Handle` without calling :func:`Handle.wait`
will turn the children into zombie processes. In a long-running
program, that could be serious resource leak.
"""
with new_iocontext() as context:
handle = start_expression(self, context)
context.stdout_capture_context.start_thread_if_needed()
context.stderr_capture_context.start_thread_if_needed()
return handle
def reader(self):
r"""Start executing the expression with its stdout captured, and return
a :class:`ReaderHandle` wrapping the capture pipe.
Note that while :func:`start` uses background threads to do IO,
:func:`reader` does not, and it's the caller's responsibility to read
the child's output promptly. Otherwise the child's stdout pipe buffer
can fill up, causing the child to block and potentially leading to
performance issues or deadlocks.
>>> reader = cmd("echo", "hi").reader()
>>> with reader:
... reader.read()
b'hi\n'
"""
with new_iocontext() as context:
handle = start_expression(self.stdout_capture(), context)
read_pipe = context.stdout_capture_context.get_read_pipe()
context.stderr_capture_context.start_thread_if_needed()
return ReaderHandle(handle, read_pipe)
def pipe(self, right_side):
r"""Combine two expressions to form a pipeline.
>>> cmd("echo", "hi").pipe(cmd("sed", "s/i/o/")).read()
'ho'
During execution, if one side of the pipe returns a non-zero exit
status, that becomes the status of the whole pipe, similar to Bash's
``pipefail`` option. If both sides return non-zero, and one of them is
:func:`unchecked`, then the checked side wins. Otherwise the right side
wins.
During spawning, if the left side of the pipe spawns successfully, but
the right side fails to spawn, the left side will be killed and
awaited. That's necessary to return the spawn errors immediately,
without leaking the left side as a zombie.
"""
return Expression(PIPE, None, (self, right_side))
def stdin_bytes(self, buf):
r"""Redirect the standard input of the expression to a pipe, and write
the supplied bytes to the pipe using a background thread.
This also accepts a string, in which case it converts any ``\n``
characters to ``os.linesep`` and encodes the result as UTF-8.
>>> cmd("cat").stdin_bytes(b"foo").read()
'foo'
"""
return Expression(STDIN_BYTES, self, buf)
def stdin_path(self, path):
r"""Redirect the standard input of the expression to a file opened from
the supplied filepath.
This works with strings, bytes, and pathlib :class:`Path` objects.
>>> cmd("head", "-c10").stdin_path("/dev/zero").read()
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
"""
return Expression(STDIN_PATH, self, path)
def stdin_file(self, file_):
r"""Redirect the standard input of the expression to the supplied file.
This works with any file-like object accepted by :class:`Popen`,
including raw file descriptors.
>>> f = open("/dev/zero")
>>> cmd("head", "-c10").stdin_file(f).read()
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
"""
return Expression(STDIN_FILE, self, file_)
def stdin_null(self):
r"""Redirect the standard input of the expression to ``/dev/null``.
>>> cmd("cat").stdin_null().read()
''
"""
return Expression(STDIN_NULL, self)
def stdout_path(self, path):
r"""Redirect the standard output of the expression to a file opened
from the supplied filepath.
This works with strings, bytes, and pathlib :class:`Path` objects.
>>> cmd("echo", "hi").stdout_path("/tmp/outfile").run()
Output(status=0, stdout=None, stderr=None)
>>> open("/tmp/outfile").read()
'hi\n'
"""
return Expression(STDOUT_PATH, self, path)
def stdout_file(self, file_):
r"""Redirect the standard output of the expression to the supplied file.
This works with any file-like object accepted by :class:`Popen`,
including raw file descriptors.
>>> f = open("/dev/null", "w")
>>> cmd("echo", "hi").stdout_file(f).run()
Output(status=0, stdout=None, stderr=None)
"""
return Expression(STDOUT_FILE, self, file_)
def stdout_null(self):
r"""Redirect the standard output of the expression to ``/dev/null``.
>>> cmd("echo", "hi").stdout_null().run()
Output(status=0, stdout=None, stderr=None)
"""
return Expression(STDOUT_NULL, self)
def stdout_capture(self):
r"""Capture the standard output of the expression. The captured bytes
become the ``stdout`` field of the returned :class:`Output`.
>>> cmd("echo", "hi").stdout_capture().run()
Output(status=0, stdout=b'hi\n', stderr=None)
"""
return Expression(STDOUT_CAPTURE, self)
def stdout_to_stderr(self):
r"""Merge the standard output of the expression with its stderr.
>>> bash_cmd = cmd("bash", "-c", "echo out && echo err 1>&2")
>>> bash_cmd.stdout_to_stderr().stdout_capture().stderr_capture().run()
Output(status=0, stdout=b'', stderr=b'out\nerr\n')
"""
return Expression(STDOUT_TO_STDERR, self)
def stderr_path(self, path):
r"""Redirect the standard error of the expression to a file opened from
the supplied filepath.
This works with strings, bytes, and pathlib :class:`Path` objects.
>>> cmd("bash", "-c", "echo hi 1>&2").stderr_path("/tmp/outfile").run()
Output(status=0, stdout=None, stderr=None)
>>> open("/tmp/outfile").read()
'hi\n'
"""
return Expression(STDERR_PATH, self, path)
def stderr_file(self, file_):
r"""Redirect the standard error of the expression to the supplied file.
This works with any file-like object accepted by :class:`Popen`,
including raw file descriptors.
>>> f = open("/dev/null", "w")
>>> cmd("bash", "-c", "echo hi 1>&2").stderr_file(f).run()
Output(status=0, stdout=None, stderr=None)
"""
return Expression(STDERR_FILE, self, file_)
def stderr_null(self):
r"""Redirect the standard error of the expression to ``/dev/null``.
>>> cmd("bash", "-c", "echo hi 1>&2").stderr_null().run()
Output(status=0, stdout=None, stderr=None)
"""
return Expression(STDERR_NULL, self)
def stderr_capture(self):
r"""Capture the standard error of the expression. The captured bytes
become the ``stderr`` field of the returned :class:`Output`.
>>> cmd("bash", "-c", "echo hi 1>&2").stderr_capture().run()
Output(status=0, stdout=None, stderr=b'hi\n')
"""
return Expression(STDERR_CAPTURE, self)
def stderr_to_stdout(self):
r"""Merge the standard error of the expression with its stdout.
>>> bash_cmd = cmd("bash", "-c", "echo out && echo err 1>&2")
>>> bash_cmd.stderr_to_stdout().stdout_capture().stderr_capture().run()
Output(status=0, stdout=b'out\nerr\n', stderr=b'')
"""
return Expression(STDERR_TO_STDOUT, self)
def stdout_stderr_swap(self):
r"""Swap the standard output and standard error of the expression.
>>> bash_cmd = cmd("bash", "-c", "echo out && echo err 1>&2")
>>> swapped_cmd = bash_cmd.stdout_stderr_swap()
>>> swapped_cmd.stdout_capture().stderr_capture().run()
Output(status=0, stdout=b'err\n', stderr=b'out\n')
"""
return Expression(STDOUT_STDERR_SWAP, self)
def dir(self, path):
r"""Set the working directory for the expression.
>>> cmd("pwd").dir("/").read()
'/'
Note that :func:`dir` does *not* affect the meaning of relative exe
paths. For example in the expression ``cmd("./foo.sh").dir("bar")``,
the script ``./foo.sh`` will execute, *not* the script
``./bar/foo.sh``. However, it usually *does* affect how the child
process interprets relative paths in command arguments.
"""
return Expression(DIR, self, path)
def env(self, name, val):
r"""Set an environment variable in the expression's environment.
>>> cmd("bash", "-c", "echo $FOO").env("FOO", "bar").read()
'bar'
"""
return Expression(ENV, self, (name, val))
def env_remove(self, name):
r"""Unset an environment variable in the expression's environment.
>>> os.environ["FOO"] = "bar"
>>> cmd("bash", "-c", "echo $FOO").env_remove("FOO").read()
''
Note that all of Duct's ``env`` functions follow OS rules for
environment variable case sensitivity. That means that
``env_remove("foo")`` will unset ``FOO`` on Windows (where ``foo`` and
``FOO`` are equivalent) but not on Unix (where they are separate
variables). Portable programs should restrict themselves to uppercase
environment variable names for that reason.
"""
return Expression(ENV_REMOVE, self, name)
def full_env(self, env_dict):
r"""Set the entire environment for the expression, from a dictionary of
name-value pairs.
>>> os.environ["FOO"] = "bar"
>>> os.environ["BAZ"] = "bing"
>>> cmd("bash", "-c", "echo $FOO$BAZ").full_env({"FOO": "xyz"}).read()
'xyz'
Note that some environment variables are required for normal program
execution (like SystemRoot on Windows), so copying the parent's
environment is usually preferable to starting with an empty one.
"""
return Expression(FULL_ENV, self, env_dict)
def unchecked(self):
r"""Prevent a non-zero exit status from raising a :class:`StatusError`.
The unchecked exit code will still be there on the :class:`Output`
returned by :func:`run`; its value doesn't change.
>>> cmd("false").run()
Traceback (most recent call last):
...
duct.StatusError: Expression cmd('false') returned non-zero exit status: Output(status=1, stdout=None, stderr=None)
>>> cmd("false").unchecked().run()
Output(status=1, stdout=None, stderr=None)
"Uncheckedness" sticks to an exit code as it propagates up from part of
a pipeline, but it doesn't "infect" other exit codes. So for example,
if only one sub-expression in a pipe is :func:`unchecked`, then errors
returned by the other side will still be checked.
>>> cmd("false").pipe(cmd("true")).unchecked().run()
Output(status=1, stdout=None, stderr=None)
>>> cmd("false").unchecked().pipe(cmd("true")).run()
Output(status=1, stdout=None, stderr=None)
>>> cmd("false").pipe(cmd("true").unchecked()).run()
Traceback (most recent call last):
...
duct.StatusError: Expression cmd('false').pipe(cmd('true').unchecked()) returned non-zero exit status: Output(status=1, stdout=None, stderr=None)
""" # noqa: E501
return Expression(UNCHECKED, self)
def before_spawn(self, callback):
r"""
Add a callback for modifying the arguments to :func:`Popen` right
before it's called. The callback will be passed a command list (the
program followed by its arguments) and a keyword arguments dictionary,
and it may modify either. The callback's return value is ignored.
The callback is called for each command in its sub-expression, and each
time the expression is executed. That call happens after other features
like :func:`stdout` and :func:`env` have been applied, so any changes
made by the callback take priority. More than one callback can be
added, in which case the innermost is executed last. For example, if
one call to :func:`before_spawn` is applied to an entire :func:`pipe`
expression, and another call is applied to just one command within the
pipeline, the callback for the entire pipeline will be called first
over the command where both hooks apply.
This is intended for rare and tricky cases, like callers who want to
change the group ID of their child processes, or who want to run code
in :func:`Popen.preexec_fn`. Most callers shouldn't need to use it.
>>> def add_sneaky_arg(command, kwargs):
... command.append("sneaky!")
>>> cmd("echo", "being").before_spawn(add_sneaky_arg).read()
'being sneaky!'
"""
return Expression(BEFORE_SPAWN, self, callback)
def start_expression(expression, context):
handle_inner = None
handle_payload_cell = [None]
if expression._type == CMD:
prog, args = expression._payload
handle_payload_cell[0] = start_cmd(context, prog, args)
elif expression._type == PIPE:
left_expr, right_expr = expression._payload
handle_payload_cell[0] = start_pipe(context, left_expr, right_expr)
else:
# IO redirect expressions
with modify_context(expression, context,
handle_payload_cell) as modified_context:
handle_inner = start_expression(expression._inner,
modified_context)
return Handle(expression._type, handle_inner, handle_payload_cell[0],
str(expression), context.stdout_capture_context,
context.stderr_capture_context)
def start_cmd(context, prog, args):
prog_str = stringify_with_dot_if_path(prog)
maybe_absolute_prog = maybe_canonicalize_exe_path(prog_str, context)
args_strs = [stringify_if_path(arg) for arg in args]
command = [maybe_absolute_prog] + args_strs
kwargs = {
"cwd": context.dir,
"env": context.env,
"stdin": context.stdin,
"stdout": context.stdout,
"stderr": context.stderr,
}
# The innermost hooks are pushed last, and we execute them last.
for hook in context.before_spawn_hooks:
hook(command, kwargs)
return safe_popen(command, **kwargs)
def start_pipe(context, left_expr, right_expr):
read_pipe, write_pipe = open_pipe()
with read_pipe:
with write_pipe:
# Start the left side first. If this fails for some reason,
# just let the failure propagate.
left_context = context._replace(stdout=write_pipe)
left_handle = start_expression(left_expr, left_context)
# Now the left side is started. If the right side fails to start,
# we can't let the left side turn into a zombie. We have to await
# it, and that means we have to kill it.
right_context = context._replace(stdin=read_pipe)
try:
right_handle = start_expression(right_expr, right_context)
except Exception:
kill(left_handle)
# This wait helper function doesn't throw on non-zero statuses or
# join capture threads.
wait_on_status(left_handle, True)
raise
return (left_handle, right_handle)
@contextmanager
def modify_context(expression, context, payload_cell):
arg = expression._payload
if expression._type == STDIN_BYTES:
if is_unicode(arg):
buf = encode_with_universal_newlines(arg)
elif is_bytes(arg):
buf = arg
else:
raise TypeError("Not a valid stdin_bytes parameter: " + repr(arg))
input_reader = io.BytesIO(buf)
with start_input_thread(input_reader, payload_cell) as read_pipe:
yield context._replace(stdin=read_pipe)
elif expression._type == STDIN_PATH:
with open_path(arg, "rb") as f:
yield context._replace(stdin=f)
elif expression._type == STDIN_FILE:
yield context._replace(stdin=arg)
elif expression._type == STDIN_NULL:
with open_devnull("rb") as f:
yield context._replace(stdin=f)
elif expression._type == STDOUT_PATH:
with open_path(arg, "wb") as f:
yield context._replace(stdout=f)
elif expression._type == STDOUT_FILE:
yield context._replace(stdout=arg)
elif expression._type == STDOUT_NULL:
with open_devnull("wb") as f:
yield context._replace(stdout=f)
elif expression._type == STDOUT_CAPTURE:
yield context._replace(
stdout=context.stdout_capture_context.get_write_pipe())
elif expression._type == STDOUT_TO_STDERR:
yield context._replace(stdout=context.stderr)
elif expression._type == STDERR_PATH:
with open_path(arg, "wb") as f:
yield context._replace(stderr=f)
elif expression._type == STDERR_FILE:
yield context._replace(stderr=arg)
elif expression._type == STDERR_NULL:
with open_devnull("wb") as f:
yield context._replace(stderr=f)
elif expression._type == STDERR_CAPTURE:
yield context._replace(
stderr=context.stderr_capture_context.get_write_pipe())
elif expression._type == STDERR_TO_STDOUT:
yield context._replace(stderr=context.stdout)
elif expression._type == STDOUT_STDERR_SWAP:
yield context._replace(stdout=context.stderr, stderr=context.stdout)
elif expression._type == DIR:
yield context._replace(dir=stringify_if_path(arg))
elif expression._type == ENV:
# Don't modify the environment dictionary in place. That would affect
# all references to it. Make a copy instead.
name, val = arg
new_env = context.env.copy()
# Windows needs special handling of env var names.
new_env[convert_env_var_name(name)] = stringify_if_path(val)
yield context._replace(env=new_env)
elif expression._type == ENV_REMOVE:
# As above, don't modify the dictionary in place.
new_env = context.env.copy()
# Windows needs special handling of env var names.
new_env.pop(convert_env_var_name(arg), None)
yield context._replace(env=new_env)
elif expression._type == FULL_ENV:
# Windows needs special handling of env var names.
new_env = dict((convert_env_var_name(k), v) for (k, v) in arg.items())
yield context._replace(env=new_env)
elif expression._type == UNCHECKED:
# Unchecked only affects what happens during wait.
yield context
elif expression._type == BEFORE_SPAWN:
# As with env, don't modify the list in place. Make a copy.
before_spawn_hooks = context.before_spawn_hooks + [arg]
yield context._replace(before_spawn_hooks=before_spawn_hooks)
else:
raise NotImplementedError # pragma: no cover
class Output(namedtuple('Output', ['status', 'stdout', 'stderr'])):
r"""The return type of :func:`Expression.run` and :func:`Handle.wait`. It
carries the pubic fields ``status``, ``stdout``, and ``stderr``. If
:func:`Expression.stdout_capture` and :func:`Expression:stderr_capture`
aren't used, ``stdout`` and ``stderr`` respectively will be ``None``.
>>> cmd("bash", "-c", "echo hi 1>&2").stderr_capture().run()
Output(status=0, stdout=None, stderr=b'hi\n')
"""
__slots__ = ()
class StatusError(subprocess.CalledProcessError):
r"""The exception raised by default when a child exits with a non-zero exit
status. See :func:`Expression.unchecked` for suppressing this. If the
exception is caught, the ``output`` field contains the :class:`Output`.
>>> from duct import StatusError
>>> try:
... cmd("bash", "-c", "echo hi 1>&2 && false").stderr_capture().run()
... except StatusError as e:
... e.output
Output(status=1, stdout=None, stderr=b'hi\n')
"""
def __init__(self, output, expression_str):
self.output = output
self._expression_str = expression_str
def __str__(self):
return 'Expression {0} returned non-zero exit status: {1}'.format(
self._expression_str, self.output)
class Handle:
r"""A handle representing one or more running child processes, returned by
the :func:`Expression.start` method.
Note that leaking a :class:`Handle` without calling :func:`wait` will turn
the children into zombie processes. In a long-running program, that could
be serious resource leak.
"""
def __init__(self, _type, inner, payload, expression_str,
stdout_capture_context, stderr_capture_context):
self._type = _type
self._inner = inner
self._payload = payload
self._expression_str = expression_str
self._stdout_capture_context = stdout_capture_context
self._stderr_capture_context = stderr_capture_context
def wait(self):
r"""Wait for the child process(es) to finish and return an
:class:`Output` containing the exit status and any captured output.
This frees the OS resources associated with the child.
>>> handle = cmd("true").start()
>>> handle.wait()
Output(status=0, stdout=None, stderr=None)
"""
status, output = wait_on_status_and_output(self)
if is_checked_error(status):
raise StatusError(output, self._expression_str)
return output
def try_wait(self):
r"""Check whether the child process(es) have finished, and if so return
an :class:`Output` containing the exit status and any captured output.
If the child has exited, this frees the OS resources associated with
it.
>>> handle = cmd("sleep", "1000").unchecked().start()
>>> assert handle.try_wait() is None
>>> handle.kill()
>>> handle.try_wait()
Output(status=-9, stdout=None, stderr=None)
"""
status = wait_on_status(self, False)
if status is None:
return None
else:
return self.wait()
def kill(self):
r"""Send a kill signal to the child process(es). This is equivalent to
:func:`Popen.kill`, which uses ``SIGKILL`` on Unix. After sending the
signal, wait for the child to finish and free the OS resources
associated with it. If the child has already been waited on, this has
no effect.
This function does not return an :class:`Output`, and it does not raise
:class:`StatusError`. However, subsequent calls to :func:`wait` or
:func:`try_wait` are likely to raise :class:`StatusError` if you didn't
use :func:`Expression.unchecked`.
>>> handle = cmd("sleep", "1000").start()
>>> handle.kill()
"""
kill(self)
# Note that this *must not* call wait_on_status_and_output. There might
# be un-signaled grandchild processes holding the output pipe, and we
# can't expect them to exit promptly. We only want to reap our
# immediate zombie children here. See gotchas.md for an extensive
# discussion of why we can't do better.
wait_on_status(self, True)
def pids(self):
r"""Return the PIDs of all the running child processes. The order of
the PIDs in the returned list is the same as the pipeline order, from
left to right.
"""
return pids(self)
# This function handle waiting and collecting output, but does not raise status
# errors for non-zero exit statuses.
def wait_on_status_and_output(handle):
status = wait_on_status(handle, True)
stdout = handle._stdout_capture_context.join_thread_if_needed()
stderr = handle._stderr_capture_context.join_thread_if_needed()
output = Output(status.code, stdout, stderr)
return (status, output)
def wait_on_status(handle, blocking):
if handle._type == CMD:
shared_child = handle._payload
return wait_child(shared_child, blocking)
elif handle._type == PIPE:
left, right = handle._payload
return wait_pipe(left, right, blocking)
status = wait_on_status(handle._inner, blocking)
if blocking:
assert status is not None
if handle._type == STDIN_BYTES:
io_thread = handle._payload
if status is not None:
io_thread.join()
elif handle._type == UNCHECKED:
if status is not None:
status = status._replace(checked=False)
return status
def wait_child(shared_child, blocking):
if blocking:
status = shared_child.wait()
else:
status = shared_child.try_wait()
if not blocking and status is None:
return None
assert status is not None
return ExecStatus(code=status, checked=True)
def wait_pipe(left, right, blocking):
left_status = wait_on_status(left, blocking)
right_status = wait_on_status(right, blocking)
if not blocking and (left_status is None or right_status is None):
return None
assert left_status is not None and right_status is not None
if is_checked_error(right_status):
return right_status
elif is_checked_error(left_status):
return left_status
elif right_status.code != 0:
return right_status
else:
return left_status
def kill(handle):
if handle._type == CMD:
shared_child = handle._payload
shared_child.kill()
elif handle._type == PIPE:
left, right = handle._payload
kill(left)
kill(right)
else:
kill(handle._inner)
def pids(handle):
if handle._type == CMD:
shared_child = handle._payload
return [shared_child.pid()]
elif handle._type == PIPE:
left, right = handle._payload
return pids(left) + pids(right)
else:
return pids(handle._inner)
def repr_expression(expression):
if expression._type == CMD:
prog, args = expression._payload
args_str = repr(prog)
for arg in args:
args_str += ", " + repr(arg)
return "cmd({})".format(args_str)
elif expression._type == PIPE:
left, right = expression._payload
return "{}.pipe({})".format(repr_expression(left),
repr_expression(right))
else:
name = NAMES[expression._type]
inner = repr_expression(expression._inner)
arg = ""
if expression._payload is not None:
if type(expression._payload) is tuple:
arg = ", ".join(repr(x) for x in expression._payload)
else:
arg = repr(expression._payload)
return "{}.{}({})".format(inner, name, arg)
# The IOContext represents the child process environment at any given point in
# the execution of an expression. We read the working directory and the entire
# environment when we create a new execution context. Methods like .env(),
# .dir(), and .pipe() will create new modified contexts and pass those to their
# children. The IOContext does *not* own any of the file descriptors it's
# holding -- it's the caller's responsibility to close those.
IOContext = namedtuple("IOContext", [
"stdin",
"stdout",
"stderr",
"dir",
"env",
"stdout_capture_context",
"stderr_capture_context",
"before_spawn_hooks",
])
@contextmanager
def new_iocontext():
# Hardcode the standard file descriptors. We can't rely on None here,
# becase stdout/stderr swapping needs to work.
context = IOContext(
stdin=0,
stdout=1,
stderr=2,
dir=os.getcwd(),
# Pretend this dictionary is immutable please.
env=os.environ.copy(),
stdout_capture_context=OutputCaptureContext(),
stderr_capture_context=OutputCaptureContext(),
before_spawn_hooks=[],
)
try:
yield context
finally:
context.stdout_capture_context.close_write_pipe_if_needed()
context.stderr_capture_context.close_write_pipe_if_needed()
ExecStatus = namedtuple("ExecStatus", ["code", "checked"])
def is_checked_error(exec_status):
return exec_status.code != 0 and exec_status.checked
@contextmanager
def open_devnull(mode):
# We open devnull ourselves because Python 2 doesn't support DEVNULL.
with open(os.devnull, mode) as f:
yield f
def is_bytes(val):
# Note that bytes is the same as str in Python 2.
return isinstance(val, (bytes, bytearray))
def is_unicode(val):
unicode_type = type(u"")
return isinstance(val, unicode_type)
@contextmanager
def open_path(path_or_string, mode):
with open(stringify_if_path(path_or_string), mode) as f:
yield f
@contextmanager
def start_input_thread(input_reader, writer_thread_cell):
read, write = open_pipe()
def write_thread():
# If the write blocks on a full pipe buffer (default 64 KB on Linux),
# and then the program on the other end quits before reading
# everything, the write will throw. Catch this error.
#
# Note that on macOS, *both* write *and* close can raise a
# BrokenPipeError. So we put the try on the outside.
try:
with write:
shutil.copyfileobj(input_reader, write)
except PIPE_CLOSED_ERROR:
pass
thread = DaemonicThread(write_thread)
writer_thread_cell[0] = thread
thread.start()
with read:
yield read
# The stdout_capture() and stderr_capture() pipes are shared by all
# sub-expressions, but we don't want to open them if nothing is going to be
# captured. Also we don't want to spawn background reader threads when nothing
# is captured, or when the calling thread will be reading. This type handles
# the bookkeeping for all of that.
class OutputCaptureContext:
def __init__(self):
self._read_pipe = None
self._write_pipe = None
self._thread = None
def get_write_pipe(self):
if self._write_pipe is None:
self._read_pipe, self._write_pipe = open_pipe()
return self._write_pipe
def get_read_pipe(self):
assert self._read_pipe is not None
return self._read_pipe
def close_write_pipe_if_needed(self):
if self._write_pipe is not None:
self._write_pipe.close()
def start_thread_if_needed(self):
if self._read_pipe is None:
return
def read_fn():
with self._read_pipe:
return self._read_pipe.read()
self._thread = DaemonicThread(read_fn)
self._thread.start()
def join_thread_if_needed(self):
if self._thread is not None:
return self._thread.join()
else:
return None
def stringify_if_path(x):
if isinstance(x, PurePath):
return str(x)
return x
# Pathlib never renders a leading './' in front of a local path. That's an
# issue because on POSIX subprocess.py (like bash) won't execute scripts in the
# current directory without it. In the same vein, we also don't want
# Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both
# issues, we explicitly join a leading dot to any relative pathlib path.
def stringify_with_dot_if_path(x):
if isinstance(x, PurePath):
# Note that join does nothing if the path is absolute.
return os.path.join('.', str(x))
return x
# A thread that sets the daemon flag to true, so that it doesn't block process
# exit. This also includes several other conveniences:
# - It takes a target function argument in its constructor, so that you don't
# have to subclass it every time you use it.
# - The return value from join() is whatever the target function returned.
# - join() re-raises any exceptions from the target function.
class DaemonicThread(threading.Thread):
def __init__(self, target, args=(), kwargs=None, **thread_kwargs):
threading.Thread.__init__(self, **thread_kwargs)
self.daemon = True
self._target = target
self._args = args
self._kwargs = kwargs or {}
self._return = None
self._exception = None
def run(self):
try:
self._return = self._target(*self._args, **self._kwargs)
except Exception as e:
self._exception = e
def join(self):
threading.Thread.join(self)
if self._exception is not None:
raise self._exception
return self._return
def open_pipe():
read_fd, write_fd = os.pipe()
read_mode, write_mode = ('rb', 'wb')
return os.fdopen(read_fd, read_mode), os.fdopen(write_fd, write_mode)
# There's a tricky interaction between exe paths and `dir`. Exe paths can be
# relative, and so we have to ask: Is an exe path interpreted relative to the
# parent's cwd, or the child's? The answer is that it's platform dependent! >.<
# (Windows uses the parent's cwd, but because of the fork-chdir-exec pattern,
# Unix usually uses the child's.)
#
# We want to use the parent's cwd consistently, because that saves the caller
# from having to worry about whether `dir` will have side effects, and because
# it's easy for the caller to use path.join if they want to. That means that
# when `dir` is in use, we need to detect exe names that are relative paths,
# and absolutify them. We want to do that as little as possible though, both
# because canonicalization can fail, and because we prefer to let the caller
# control the child's argv[0].
#
# We never want to absolutify a name like "emacs", because that's probably a
# program in the PATH rather than a local file. So we look for slashes in the
# name to determine what's a filepath and what isn't. Note that anything given
# as a Path will always have a slash by the time we get here, because
# stringify_with_dot_if_path prepends a ./ to them when they're relative. This
# leaves the case where Windows users might pass a local file like "foo.bat" as
# a string, which we can't distinguish from a global program name. However,
# because the Windows has the preferred "relative to parent's cwd" behavior
# already, this case actually works without our help. (The thing Windows users
# have to watch out for instead is local files shadowing global program names,
# which I don't think we can or should prevent.)
def maybe_canonicalize_exe_path(exe_name, iocontext):
has_sep = (os.path.sep in exe_name
or (os.path.altsep is not None and os.path.altsep in exe_name))
if has_sep and iocontext.dir is not None and not os.path.isabs(exe_name):
return os.path.realpath(exe_name)
else:
return exe_name
popen_lock = threading.Lock()
def is_windows():
return os.name == "nt"
# This wrapper works around two major deadlock issues to do with pipes. The
# first is that, before Python 3.2 on POSIX systems, os.pipe() creates
# inheritable file descriptors, which leak to all child processes and prevent
# reads from reaching EOF. The workaround for this is to set close_fds=True on
# POSIX, which was not the default in those versions. See PEP 0446 for many
# details.
#
# TODO: Revisit this workaround when we drop Python 2 support.
#
# The second issue arises on Windows, where we're not allowed to set
# close_fds=True while also setting stdin/stdout/stderr. Descriptors from
# os.pipe() on Windows have never been inheritable, so it would seem that we're
# safe. However, the Windows implementation of subprocess.Popen() creates
# temporary inheritable copies of its descriptors, and these can leak. The
# workaround for this is to protect Popen() with a global lock. See
# https://bugs.python.org/issue25565.
#
# This function also returns a SharedChild object, which wraps
# subprocess.Popen. That type works around another race condition to do with
# signaling children.
def safe_popen(*args, **kwargs):
close_fds = not is_windows()
with popen_lock:
return SharedChild(*args, close_fds=close_fds, **kwargs)
# We could let our pipes do this for us, by opening them in universal newlines
# mode, but it's a bit cleaner to do it ourselves. That saves us from passing
# around the mode all over the place, and from having decoding exceptions
# thrown on reader threads.
def decode_with_universal_newlines(b):
return b.decode('utf8').replace('\r\n', '\n').replace('\r', '\n')
def encode_with_universal_newlines(s):
return s.replace('\n', os.linesep).encode('utf8')
# Environment variables are case-insensitive on Windows. To deal with that,
# Python on Windows converts all the keys in os.environ to uppercase
# internally. That's mostly transparent when we deal with os.environ directly,
# but when we call os.environ.copy(), we get a regular dictionary with all the
# keys uppercased. We need to do a similar conversion, or else additions and
# removals in that copy won't interact properly with the inherited parent
# environment.
def convert_env_var_name(var):
if is_windows():
return var.upper()
return var
# The wait() and kill() methods on the standard library Popen class have a race
# condition on Unix. Normally kill() checks to see whether a process has
# already been awaited before sending a signal, so that if the PID has been
# reused by an unrelated process in the meantime it won't accidentally signal
# that unrelated process. However, if kill() and wait() are called from
# different threads, it's possible for wait() to free the PID *after* kill()
# has seen that the child is still running. If the kill() thread pauses at
# exactly that moment, long enough for the OS to reuse the PID, kill() could
# kill the wrong process. This is unlikely under ordinary circumstances, but
# more likely if the system is under heavy load and the PID space is almost
# exhausted.
#
# The workaround for this race condition on Unix is to use:
#
# os.waitid(os.P_PID, child_pid, os.WEXITED | os.WNOWAIT)
#
# That call waits on the child to exit, but *doesn't* free its PID for reuse.
# Then we set an internal flag that's synchronized with kill(), before finally
# calling wait() to reap the child.
#
# Note that Windows doesn't have this problem, because child handles (unlike
# raw PIDs) have to be explicitly closed.
class SharedChild:
def __init__(self, *args, **kwargs):
self._child = subprocess.Popen(*args, **kwargs)
# The child lock is only held for non-blocking calls. Threads making a
# blocking call to os.waitid() release the child lock first. This
# ensures that one thread can call try_wait() while another thread is
# blocked on wait().
self._child_lock = threading.Lock()
self._wait_lock = threading.Lock()
def wait(self):
with self._wait_lock:
# See if another thread already waited. If so, return the status we
# got before. If not, immediately release the child lock, and move
# on to call wait ourselves.
with self._child_lock:
if self._child.returncode is not None:
return self._child.returncode
# No other thread has waited, we're holding the wait lock, and
# we've released the child lock. It's now our job to wait. As
# documented above, if os.waitid is defined, use that function to
# await the child without reaping it. Otherwise we do an ordinary
# Popen.wait and accept the race condition on some platforms.
if HAS_WAITID:
os.waitid(os.P_PID, self._child.pid, os.WEXITED | os.WNOWAIT)
else:
# Python does synchronize this internally, so it won't race
# with other calls to wait() or poll(). Unfortunately it still
# races with kill(), which is what all of this is about.
self._child.wait()
# Finally, while still holding the wait lock, re-acquire the child
# lock to reap the child and write the result. Since we know the
# child has already exited, this won't block. Any other waiting
# threads that were blocked on us will see our result.
with self._child_lock:
# If the child was already reaped above in the !HAS_WAITID
# branch, this second wait will be a no-op with a cached
# returncode.
return self._child.wait()
def try_wait(self):
with self._child_lock:
if self._child.returncode is not None:
return self._child.returncode
# The child hasn't been waited on yet, so we need to do a
# non-blocking check to see if it's still running. The Popen type
# provides the poll() method for this, but that might reap the
# child and free its PID, which would make this a race with
# concurrent callers of the blocking wait() method above, who might
# be about to call os.waitid on that PID. When os.waitid is
# available, use that again here, with the WNOHANG flag. Otherwise
# just use poll() and rely on Python's internal synchronization.
if HAS_WAITID:
poll_result = os.waitid(os.P_PID, self._child.pid,
os.WEXITED | os.WNOWAIT | os.WNOHANG)
else:
poll_result = self._child.poll()
# If either of the poll approaches above returned non-None, do a full
# wait to reap the child, which will not block. Note that we've
# released the child lock here, because wait() will re-acquire it.
if poll_result is not None:
return self.wait()
else:
return None
def kill(self):
with self._child_lock:
if self._child.returncode is None:
# Previously we just used Popen.kill here. However, as of
# Python 3.9, Popen.send_signal (which is called by Popen.kill)
# calls Popen.poll first, as a best-effort check for the same
# PID race that this class is designed around. That means that
# if the child has already exited, Popen.kill will reap it. Now
# that we check Popen.returncode throughout this class (as of
# the same commit that adds this comment), we'll see the
# non-None exit status there as a side effect if reaping has
# happened. That *might* mean we could still call Popen.kill
# here safely. However, there's also the question of how
# Popen.poll's call to os.waitpid would interact with our own
# blocking call to os.waitid from another thread. The worry is
# that the waitpid call might take effect first, causing waitid
# to return a "no child found" error. I can confirm that
# happens on Linux when both calls are blocking. Here though,
# the waitpid call is non-blocking, which *might* mean it can't
# happen first, but that's going to depend on the OS. We could
# assume that it can happen and try to catch the error from
# waitid, but that codepath would be impossible to test. So
# what we actually do here is reimplement the documented
# behavior of Popen.kill: os.kill(pid, SIGKILL) on Unix, and
# Popen.terminate on Windows.
if is_windows():
self._child.terminate()
else:
os.kill(self._child.pid, signal.SIGKILL)
def pid(self):
return self._child.pid
class ReaderHandle(io.IOBase):
r"""A stdout reader that automatically closes its read pipe and awaits
child processes once EOF is reached.
This inherits from :class:`io.IOBase`, and you can call :func:`read` and
related methods like :func:`readlines` on it. When :class:`ReaderHandle` is
used as a context manager with the ``with`` keyword, context exit will
automatically call :func:`close`.
Note that if you don't read to EOF, and you don't call :func:`close` or use
a ``with`` statement, then the child will become a zombie. Using a ``with``
statement is recommended for exception safety.
If one thread is blocked on a call to :func:`read`, then calling
:func:`kill` from another thread is an effective way to unblock the reader.
However, note that killed child processes return a non-zero exit status,
which turns into an exception for the reader by default, unless you use
:func:`Expression.unchecked`.
"""
def __init__(self, handle, read_pipe):
self._handle = handle
self._read_pipe = read_pipe
def read(self, size=-1):
r"""Read bytes from the child's standard output. Because
:class:`ReaderHandle` inherits from :class:`io.IOBase`, related methods
like :func:`readlines` are also available.
>>> reader = cmd("printf", r"a\nb\nc\n").reader()
>>> with reader:
... reader.read(2)
... reader.readlines()
b'a\n'
[b'b\n', b'c\n']
If :func:`read` reaches EOF and awaits the child, and the child exits
with a non-zero status, and :func:`Expression.unchecked` was not used,
:func:`read` will raise a :class:`StatusError`.
>>> with cmd("false").reader() as reader:
... reader.read()
Traceback (most recent call last):
...
duct.StatusError: Expression cmd('false').stdout_capture() returned non-zero exit status: Output(status=1, stdout=None, stderr=None)
""" # noqa: E501
if self._read_pipe is None:
self._handle.wait() # May raise a StatusError.
return b""
is_zero_size = size == 0
is_positive_size = type(size) is int and size > 0
is_read_to_end = not is_zero_size and not is_positive_size
ret = self._read_pipe.read(size)
if is_read_to_end or (is_positive_size and ret == b""):
self._read_pipe.close()
self._read_pipe = None
self._handle.wait() # May raise a StatusError.
return ret
def close(self):
r"""Close the read pipe and call :func:`kill` on the inner
:class:`Handle`.
:class:`ReaderHandle` is a context manager, and if you use it with the
`with` keyword, context exit will automatically call :func:`close`.
Using a ``with`` statement is recommended, for exception safety.
>>> reader = cmd("echo", "hi").reader()
>>> reader.close()
"""
if self._read_pipe is not None:
self._handle.kill() # Does not raise StatusError.
self._read_pipe.close()
self._read_pipe = None
def try_wait(self):
r"""Check whether the child process(es) have finished, and if so return
an :class:`Output` containing the exit status and any captured output.
This is equivalent to :func:`Handle.try_wait`.
Note that the ``stdout`` field of the returned :class:`Output` will
always be ``None``, because the :class:`ReaderHandle` itself owns the
child's stdout pipe.
>>> input_bytes = bytes([42]) * 1000000
>>> reader = cmd("cat").stdin_bytes(input_bytes).reader()
>>> with reader:
... assert reader.try_wait() is None
... output_bytes = reader.read()
... assert reader.try_wait() is not None
... assert input_bytes == output_bytes
"""
return self._handle.try_wait()
def kill(self):
r"""Call :func:`kill` on the inner :class:`Handle`.
This function does not raise :class:`StatusError`. However, subsequent
calls to :func:`read` are likely to raise :class:`StatusError` if you
didn't use :func:`Expression.unchecked`.
>>> child_code = "import sys, time; print('hi'); sys.stdout.flush(); time.sleep(1000000)"
>>> reader = cmd("python", "-c", child_code).unchecked().reader()
>>> with reader:
... reader.read(3)
... reader.kill()
... reader.read()
b'hi\n'
b''
""" # noqa: E501
self._handle.kill()
def pids(self):
r"""Return the PIDs of all the running child processes. The order of
the PIDs in the returned list is the same as the pipeline order, from
left to right.
"""
return self._handle.pids()
|
oconnor663/duct.py
|
duct.py
|
Python
|
mit
| 54,315
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.notes.models.notes import EventNote
from indico.modules.events.notes.util import build_note_api_data
from indico.modules.events.sessions import Session
from indico.web.http_api import HTTPAPIHook
from indico.web.http_api.responses import HTTPAPIError
@HTTPAPIHook.register
class NoteExportHook(HTTPAPIHook):
TYPES = ('note',)
RE = (r'(?P<event_id>\d+)'
r'((/session/(?P<session_id>\d+)|(/contribution/(?P<contribution_id>\d+)(/(?P<subcontribution_id>\d+))?))?)?')
MAX_RECORDS = {}
GUEST_ALLOWED = True
VALID_FORMATS = ('json', 'jsonp', 'xml')
def _getParams(self):
super(NoteExportHook, self)._getParams()
event = self._obj = Event.get(self._pathParams['event_id'], is_deleted=False)
if event is None:
raise HTTPAPIError('No such event', 404)
session_id = self._pathParams.get('session_id')
if session_id:
self._obj = Session.query.with_parent(event).filter_by(id=session_id).first()
if self._obj is None:
raise HTTPAPIError("No such session", 404)
contribution_id = self._pathParams.get('contribution_id')
if contribution_id:
contribution = self._obj = (Contribution.query.with_parent(event)
.filter_by(id=contribution_id, is_deleted=False)
.first())
if contribution is None:
raise HTTPAPIError("No such contribution", 404)
subcontribution_id = self._pathParams.get('subcontribution_id')
if subcontribution_id:
self._obj = SubContribution.query.with_parent(contribution).filter_by(id=subcontribution_id,
is_deleted=False).first()
if self._obj is None:
raise HTTPAPIError("No such subcontribution", 404)
self._note = EventNote.get_for_linked_object(self._obj, preload_event=False)
if self._note is None or self._note.is_deleted:
raise HTTPAPIError("No such note", 404)
def _hasAccess(self, aw):
user = aw.getUser().user if aw.getUser() else None
return self._obj.can_access(user)
def export_note(self, aw):
return build_note_api_data(self._note)
|
belokop/indico_bare
|
indico/modules/events/notes/api.py
|
Python
|
gpl-3.0
| 3,341
|
#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
wuga214/Django-Wuga
|
env/bin/player.py
|
Python
|
apache-2.0
| 2,120
|
"""
Utility classes and tools for handling data contained in bpch files
"""
from dask import delayed
import dask.array as da
import numpy as np
import os
from collections import OrderedDict
from . uff import FortranFile
from . util import cf
from . util.diaginfo import get_diaginfo, get_tracerinfo
#: Default datatype for legacy bpch output
DEFAULT_DTYPE = 'f4'
class BPCHDataBundle(object):
""" A single slice of a single variable inside a bpch file, and all
of its critical accompanying metadata. """
__slots__ = ('_shape', 'dtype', 'endian', 'filename', 'file_position',
'time', 'metadata', '_data', '_mmap', '_dask')
def __init__(self, shape, endian, filename, file_position, time,
metadata, data=None, dtype=None,
use_mmap=False, dask_delayed=False):
self._shape = shape
self.dtype = dtype
self.endian = endian
self.filename = filename
self.file_position = file_position
self.time = time
self.metadata = metadata
if dtype is None:
self.dtype = np.dtype(self.endian + DEFAULT_DTYPE)
else:
self.dtype = dtype
# Note that data is initially prescribed as None, but we keep a hook
# here so that we can inject payloads at load time, if we want
# (for instance, to avoid reading/memmapping through a file)
self._data = data
self._mmap = use_mmap
self._dask = dask_delayed
@property
def shape(self):
return self._shape
@property
def ndim(self):
return len(self.shape)
@property
def array(self):
return self.data
@property
def data(self):
if self._data is None:
self._data = self._read()
return self._data
def _read(self):
""" Helper function to load the data referenced by this bundle. """
if self._dask:
d = da.from_delayed(
delayed(read_from_bpch, )(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
),
self.shape, self.dtype
)
else:
d = read_from_bpch(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
)
return d
class BPCHFile(object):
""" A file object for representing BPCH data on disk
Attributes
----------
fp : FortranFile
A pointer to the open unformatted Fortran binary output (the original
bpch file)
var_data, var_attrs : dict
Containers of `BPCHDataBundle`s and dicts, respectively, holding
the accessor functions to the raw bpch data and their associated
metadata
"""
def __init__(self, filename, mode='rb', endian='>',
diaginfo_file='', tracerinfo_file='', eager=False,
use_mmap=False, dask_delayed=False):
""" Load a BPCHFile
Parameters
----------
filename : str
Path to the bpch file on disk
mode : str
Mode string to pass to the file opener; this is currently fixed to
"rb" and all other values will be rejected
endian : str {">", "<", ":"}
Endian-ness of the Fortran output file
{tracerinfo, diaginfo}_file : str
Path to the tracerinfo.dat and diaginfo.dat files containing
metadata pertaining to the output in the bpch file being read.
eager : bool
Flag to immediately read variable data; if "False", then nothing
will be read from the file and you'll need to do so manually
use_mmap : bool
Use memory-mapping to read data from file
dask_delayed : bool
Use dask to create delayed references to the data-reading functions
"""
self.mode = mode
if not mode.startswith('r'):
raise ValueError("Currently only know how to 'r(b)'ead bpch files.")
self.filename = filename
self.fsize = os.path.getsize(self.filename)
self.endian = endian
# Open a pointer to the file
self.fp = FortranFile(self.filename, self.mode, self.endian)
dir_path = os.path.abspath(os.path.dirname(filename))
if not dir_path:
dir_path = os.getcwd()
if not tracerinfo_file:
tracerinfo_file = os.path.join(dir_path, "tracerinfo.dat")
if not os.path.exists(tracerinfo_file):
tracerinfo_file = ''
self.tracerinfo_file = tracerinfo_file
if not diaginfo_file:
diaginfo_file = os.path.join(dir_path, "diaginfo.dat")
if not os.path.exists(diaginfo_file):
diaginfo_file = ''
self.diaginfo_file = diaginfo_file
# Container to record file metadata
self._attributes = OrderedDict()
# Don't necessarily need to save diag/tracer_dict yet
self.diaginfo_df, _ = get_diaginfo(self.diaginfo_file)
self.tracerinfo_df, _ = get_tracerinfo(self.tracerinfo_file)
# Container for bundles contained in the output file.
self.var_data = {}
self.var_attrs = {}
# Critical information for accessing file contents
self._header_pos = None
# Data loading strategy
self.use_mmap = use_mmap
self.dask_delayed = dask_delayed
# Control eager versus deferring reading
self.eager = eager
if (mode.startswith('r') and self.eager):
self._read()
def close(self):
""" Close this bpch file.
"""
if not self.fp.closed:
for v in list(self.var_data):
del self.var_data[v]
self.fp.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _read(self):
""" Parse the entire bpch file on disk and set up easy access to meta-
and data blocks.
"""
self._read_metadata()
self._read_header()
self._read_var_data()
def _read_metadata(self):
""" Read the main metadata packaged within a bpch file, indicating
the output filetype and its title.
"""
filetype = self.fp.readline().strip()
filetitle = self.fp.readline().strip()
# Decode to UTF string, if possible
try:
filetype = str(filetype, 'utf-8')
filetitle = str(filetitle, 'utf-8')
except:
# TODO: Handle this edge-case of converting file metadata more elegantly.
pass
self.__setattr__('filetype', filetype)
self.__setattr__('filetitle', filetitle)
def _read_header(self):
""" Process the header information (data model / grid spec) """
self._header_pos = self.fp.tell()
line = self.fp.readline('20sffii')
modelname, res0, res1, halfpolar, center180 = line
self._attributes.update({
"modelname": str(modelname, 'utf-8').strip(),
"halfpolar": halfpolar,
"center180": center180,
"res": (res0, res1)
})
self.__setattr__('modelname', modelname)
self.__setattr__('res', (res0, res1))
self.__setattr__('halfpolar', halfpolar)
self.__setattr__('center180', center180)
# Re-wind the file
self.fp.seek(self._header_pos)
def _read_var_data(self):
""" Iterate over the block of this bpch file and return handlers
in the form of `BPCHDataBundle`s for access to the data contained
therein.
"""
var_bundles = OrderedDict()
var_attrs = OrderedDict()
n_vars = 0
while self.fp.tell() < self.fsize:
var_attr = OrderedDict()
# read first and second header lines
line = self.fp.readline('20sffii')
modelname, res0, res1, halfpolar, center180 = line
line = self.fp.readline('40si40sdd40s7i')
category_name, number, unit, tau0, tau1, reserved = line[:6]
dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:]
var_attr['number'] = number
# Decode byte-strings to utf-8
category_name = str(category_name, 'utf-8')
var_attr['category'] = category_name.strip()
unit = str(unit, 'utf-8')
# get additional metadata from tracerinfo / diaginfo
try:
cat_df = self.diaginfo_df[
self.diaginfo_df.name == category_name.strip()
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(cat_df > 1):
# raise ValueError(
# "More than one category matching {} found in "
# "diaginfo.dat".format(
# category_name.strip()
# )
# )
# Safe now to select the only row in the DataFrame
cat = cat_df.T.squeeze()
tracer_num = int(cat.offset) + int(number)
diag_df = self.tracerinfo_df[
self.tracerinfo_df.tracer == tracer_num
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(diag_df > 1):
# raise ValueError(
# "More than one tracer matching {:d} found in "
# "tracerinfo.dat".format(tracer_num)
# )
# Safe now to select only row in the DataFrame
diag = diag_df.T.squeeze()
diag_attr = diag.to_dict()
if not unit.strip(): # unit may be empty in bpch
unit = diag_attr['unit'] # but not in tracerinfo
var_attr.update(diag_attr)
except:
diag = {'name': '', 'scale': 1}
var_attr.update(diag)
var_attr['unit'] = unit
vname = diag['name']
fullname = category_name.strip() + "_" + vname
# parse metadata, get data or set a data proxy
if dim2 == 1:
data_shape = (dim0, dim1) # 2D field
else:
data_shape = (dim0, dim1, dim2)
var_attr['original_shape'] = data_shape
# Add proxy time dimension to shape
data_shape = tuple([1, ] + list(data_shape))
origin = (dim3, dim4, dim5)
var_attr['origin'] = origin
timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1)
pos = self.fp.tell()
# Note that we don't pass a dtype, and assume everything is
# single-fp floats with the correct endian, as hard-coded
var_bundle = BPCHDataBundle(
data_shape, self.endian, self.filename, pos, [timelo, timehi],
metadata=var_attr,
use_mmap=self.use_mmap, dask_delayed=self.dask_delayed
)
self.fp.skipline()
# Save the data as a "bundle" for concatenating in the final step
if fullname in var_bundles:
var_bundles[fullname].append(var_bundle)
else:
var_bundles[fullname] = [var_bundle, ]
var_attrs[fullname] = var_attr
n_vars += 1
self.var_data = var_bundles
self.var_attrs = var_attrs
def read_from_bpch(filename, file_position, shape, dtype, endian,
use_mmap=False):
""" Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`.
"""
offset = file_position + 4
if use_mmap:
d = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
with FortranFile(filename, 'rb', endian) as ff:
ff.seek(file_position)
d = np.array(ff.readline('*f'))
d = d.reshape(shape, order='F')
# As a sanity check, *be sure* that the resulting data block has the
# correct shape, and fail early if it doesn't.
if (d.shape != shape):
raise IOError("Data chunk read from {} does not have the right shape,"
" (expected {} but got {})"
.format(filename, shape, d.shape))
return d
|
darothen/xbpch
|
xbpch/bpch.py
|
Python
|
mit
| 13,438
|
from collections import deque
import urllib
import feedparser
import os.path
from .. import *
class NyaaSource(EpisodeSource):
"""
nyaa.se trusted torrent search
cats = 1_37 - English translated only
filter = 2 - Trusted only
"""
base_name = "nyaa.se: {}"
base_url = "http://www.nyaa.se/?page=rss&cats=1_37&filter=2&{}"
def __init__(self, name, query):
self.name = self.base_name.format(name)
self.query = query
self.url = self.base_url.format(urllib.parse.urlencode({'term': self.query}))
def fetchEpisodes(self):
self.say('Looking for episodes: {}'.format(self.url))
f = feedparser.parse(self.url)
if f.bozo != 0:
self.say("Invalid feed or issue with parsing")
return []
self.say('Found {} episodes'.format(len(f['entries'])))
return flatmap(self.toEpisode, f['entries'])
def toEpisode(self, i):
raise NotImplementedError
class HorribleSubsSource(NyaaSource):
tag = "[HorribleSubs]"
titleMap = str.maketrans('._', ' ')
def __init__(self, shows, path, quality):
self.shows = shows
self.path = path
self.quality = quality
self.query = "{} {}".format(self.tag, self.quality)
super().__init__(
name = self.tag,
query = self.query
)
def shouldFetch(self, showname):
for s in self.shows:
if showname.startswith(s):
return True
return False
def parseTitle(self, title):
ex = Exception("Unable to parse title: {}".format(title))
parts = deque(title.translate(self.titleMap).split())
if parts.popleft() != self.tag:
raise ex
parts.pop() # Strip file extension
if parts.pop() != self.quality:
raise ex
epnum = parts.pop()
if parts.pop() != '-':
raise ex
return (' '.join(parts), epnum)
def toEpisode(self, i):
try:
showname, epnum = self.parseTitle(i['title'])
if self.shouldFetch(showname):
return [Episode(
source = self.name,
title = "",
showname = showname,
showid = showname,
epid = epnum,
link = i['link'],
root = os.path.join(self.path, showname)
)]
except Exception as e:
self.say(str(e))
return []
|
nepthar/autopirate
|
autopirate/sources/Nyaa.py
|
Python
|
unlicense
| 2,258
|
from django.test import TestCase
from django.db import models
from uppsell.workflow import Workflow, State, BadTransition, \
pre_transition_signal, post_transition_signal, \
pre_transition, post_transition
class ExampleModel(object):
test_state = 'A'
flag = 'RED'
def __init__(self):
self.test_state = "A"
class WorkflowTestCase(TestCase):
_model = None
_manager = None
def setUp(self):
self._model = ExampleModel()
self._manager = Workflow(self._model, "test_state")
ExampleModel.flag = "RED"
def tearDown(self):
self._model, self._manager = None, None
def test_state(self):
s1 = State(self._manager, "A")
s2 = State(self._manager, "B")
s1.add_transition("go", s2)
self.assertTrue(s1.can("go"))
self.assertFalse(s1.can("stop"))
self.assertEqual(s2, s1.next("go"))
def test_manager(self):
transitions = (
("TR_1", "A", "B"),
("TR_1", "B", "A"),
("TR_2", "B", "C"),
("TR_3", "C", "D"),
)
model = ExampleModel()
manager = Workflow(model, "test_state")
manager.set_transitions(transitions)
self.assertRaises(BadTransition, manager.do, "TR_2")
self.assertRaises(BadTransition, manager.do, "TR_3")
self.assertTrue(manager.can("TR_1"))
self.assertFalse(manager.can("TR_2"))
self.assertFalse(manager.can("TR_3"))
manager.do("TR_1")
self.assertEqual("B", getattr(model, "test_state"))
self.assertTrue(manager.can("TR_1"))
self.assertTrue(manager.can("TR_2"))
self.assertFalse(manager.can("TR_3"))
manager.do("TR_1")
self.assertEqual("A", model.test_state)
self.assertTrue(manager.can("TR_1"), "TR_1 is a valid transition from state A")
self.assertFalse(manager.can("TR_2"), "TR_2 is not a valid transition from state A")
self.assertFalse(manager.can("TR_3"), "TR_3 is not a valid transition from state A")
self.assertRaises(BadTransition, manager.do, "TR_3")
def test_manager_pre_transition_signal(self):
transitions = (
("TR_1", "A", "B"),
)
model = ExampleModel()
self.assertEqual('RED', model.flag)
manager = Workflow(model, "test_state")
manager.set_transitions(transitions)
def pre_transition_event(signal, key, transition, sender, model, state):
self.assertEqual("A", state)
self.assertEqual("TR_1", transition)
self.assertEqual(ExampleModel, model.__class__)
ExampleModel.flag = "BLUE"
pre_transition_signal.connect(pre_transition_event)
manager.do("TR_1")
self.assertEqual("B", getattr(model, "test_state"))
self.assertEqual('BLUE', ExampleModel.flag)
pre_transition_signal.disconnect(pre_transition_event)
def test_manager_post_transition_signal(self):
transitions = (("TR_1", "A", "B"),)
model = ExampleModel()
manager = Workflow(model, "test_state")
manager.set_transitions(transitions)
def post_transition_event(signal, *args, **kwargs):
self.assertEqual("B", kwargs["state"])
self.assertEqual("TR_1", kwargs["transition"])
self.assertEqual(ExampleModel, kwargs["model"].__class__)
ExampleModel.flag = "BLUE"
post_transition_signal.connect(post_transition_event)
manager.do("TR_1")
self.assertEqual("B", getattr(model, "test_state"))
self.assertEqual('BLUE', ExampleModel.flag)
def test_pre_transition_decorator(self):
transitions = (("TR_5", "A", "B"),)
self.assertEqual('RED', ExampleModel.flag)
self._manager.set_transitions(transitions)
# This get's called
@pre_transition("test_state", ExampleModel, "TR_5", "A")
def pre_transition_task(signal, key, transition, sender, model, state):
self.assertEqual("A", state)
self.assertEqual("TR_5", transition)
self.assertEqual(ExampleModel, model.__class__)
ExampleModel.flag = "BLUE"
# This doesn't get called
@pre_transition("test_state", ExampleModel, "WRONG_TRANS", "A")
def pre_transition_task_2(signal, key, transition, sender, model, state):
ExampleModel.flag = "ORANGE"
self._manager.do("TR_5")
self.assertEqual("B", getattr(self._model, "test_state"))
self.assertEqual('BLUE', ExampleModel.flag)
def test_post_transition_decorator(self):
transitions = (("TR_6", "A", "B"),)
self.assertEqual('RED', ExampleModel.flag)
self._manager.set_transitions(transitions)
@post_transition("test_state", ExampleModel, "TR_6")
def post_transition_task(signal, key, transition, sender, model, state):
self.assertEqual("B", state)
self.assertEqual("TR_6", transition)
self.assertEqual(ExampleModel, model.__class__)
ExampleModel.flag = "BLUE"
self._manager.do("TR_6")
self.assertEqual("B", getattr(self._model, "test_state"))
self.assertEqual('BLUE', ExampleModel.flag)
|
upptalk/uppsell
|
uppsell/tests/test_workflow.py
|
Python
|
mit
| 5,415
|
## Copyright 2009 Laurent Bovet <laurent.bovet@windmaster.ch>
## Jordi Puigsegur <jordi.puigsegur@gmail.com>
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import log
import yaml
import inspect
import sys
import os.path
import copy
from Cheetah.Template import Template
wfrog_version = "0.8.2.99-git"
class Configurer(object):
default_filename = None
module_map = None
log_configurer = log.LogConfigurer()
logger = logging.getLogger('config')
def __init__(self, module_map):
self.module_map = module_map
self.extensions = {}
def add_options(self, opt_parser):
opt_parser.add_option("-f", "--config", dest="config",
help="Configuration file (in yaml)", metavar="CONFIG_FILE")
opt_parser.add_option("-s", "--settings", dest="settings",
help="Settings file (in yaml)", metavar="SETTINGS_FILE")
opt_parser.add_option("-H", action="store_true", dest="help_list", help="Gives help on the configuration file and the list of possible config !elements in the yaml config file")
opt_parser.add_option("-E", dest="help_element", metavar="ELEMENT", help="Gives help about a config !element")
opt_parser.add_option("-e", "--extensions", dest="extension_names", metavar="MODULE1,MODULE2,...", help="Comma-separated list of modules containing custom configuration elements")
self.log_configurer.add_options(opt_parser)
def configure(self, options, component, config_file, settings_file=None, embedded=False):
self.config_file = config_file
self.settings_file = settings_file
if options.extension_names:
for ext in options.extension_names.split(","):
self.logger.debug("Loading extension module '"+ext+"'")
self.extensions[ext]=__import__(ext)
if options.help_list:
if component.__doc__ is not None:
print component.__doc__
for (k,v) in self.module_map:
print k
print "-"*len(k) +"\n"
self.print_help(v)
if options.extension_names:
print "Extensions"
print "----------\n"
for ext in self.extensions:
print "[" + ext + "]"
print
self.print_help(self.extensions[ext])
# Adds logger documentation
print self.log_configurer.__doc__
print " Use option -H ELEMENT for help on a particular !element"
sys.exit()
if options.help_element:
element = options.help_element
if element[0] is not '!':
element = '!' + element
desc = {}
for(k,v) in self.module_map:
desc.update(self.get_help_desc(v))
if len(desc) == 0:
for ext in self.extensions:
desc.update(self.get_help_desc(self.extensions[ext]))
if desc.has_key(element):
print
print element + " [" + desc[element][1] +"]"
print " " + desc[element][0]
print
else:
print "Element "+element+" not found or not documented"
sys.exit()
if not embedded and options.config:
self.config_file = options.config
settings_warning=False
if self.settings_file is None:
if options.settings is not None:
self.settings_file = options.settings
else:
settings_warning=True
self.settings_file = os.path.dirname(self.config_file)+'/../../wfcommon/config/default-settings.yaml'
settings = yaml.load( file(self.settings_file, 'r') )
variables = {}
variables['settings']=settings
config = yaml.load( str(Template(file=file(self.config_file, "r"), searchList=[variables])))
if settings is not None:
context = copy.deepcopy(settings)
else:
context = {}
context['_yaml_config_file'] = self.config_file
context['os']=sys.platform
if not embedded:
self.log_configurer.configure(options, config, context)
self.logger.info("Starting wfrog " + wfrog_version)
if settings_warning:
self.logger.warn('User settings are missing. Loading default ones. Run \'wfrog -S\' for user settings setup.')
self.logger.info("Loaded settings file " + os.path.normpath(self.settings_file))
self.logger.debug('Loaded settings %s', repr(settings))
self.logger.debug("Loaded config file " + os.path.normpath(self.config_file))
if config.has_key('init'):
for k,v in config['init'].iteritems():
self.logger.debug("Initializing "+k)
try:
v.init(context=context)
except AttributeError:
pass # In case the element has not init method
return ( config, context )
def print_help(self, module):
desc = self.get_help_desc(module, summary=True)
sorted = desc.keys()
sorted.sort()
for k in sorted:
print k
print " " + desc[k][0]
print
def get_help_desc(self, module, summary=False):
self.logger.debug("Getting info on module '"+module.__name__+"'")
elements = inspect.getmembers(module, lambda l : inspect.isclass(l) and yaml.YAMLObject in inspect.getmro(l))
desc={}
for element in elements:
self.logger.debug("Getting doc of "+element[0])
# Gets the documentation of the first superclass
superclass = inspect.getmro(element[1])[1]
fulldoc=superclass.__doc__
# Add the doc of the super-super-class if _element_doc is
if hasattr(inspect.getmro(superclass)[1], "_element_doc") and inspect.getmro(superclass)[1].__doc__ is not None:
fulldoc = fulldoc + inspect.getmro(superclass)[1].__doc__
firstline=fulldoc.split(".")[0]
self.logger.debug(firstline)
module_name = module.__name__.split('.')[-1]
if summary:
desc[element[1].yaml_tag] = [ firstline, module_name ]
else:
desc[element[1].yaml_tag] = [ fulldoc, module_name ]
return desc
|
wfrog/wfrog
|
wfcommon/config.py
|
Python
|
gpl-3.0
| 7,089
|
# Copyright 2016 Cyril Gaudin (Camptocamp)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class TestJournal(TransactionCase):
def test_open_reconciliation_rules(self):
# Just test that method returned the good view
result = self.env['account.journal'].open_reconciliation_rules()
self.assertEqual('account.reconcile.rule', result['res_model'])
self.assertEqual('form', result['view_type'])
|
OCA/bank-statement-reconcile
|
account_reconcile_rule/tests/test_journal.py
|
Python
|
agpl-3.0
| 499
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import multiprocessing
import os
import plistlib
import random
import tempfile
import weakref
import vtk
import wx
from wx.lib.pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.data.imagedata_utils as iu
import invesalius.data.polydata_utils as pu
import invesalius.project as prj
import invesalius.session as ses
import invesalius.data.surface_process as surface_process
import invesalius.utils as utl
import invesalius.data.vtk_utils as vu
from invesalius.data import cy_mesh
# TODO: Verificar ReleaseDataFlagOn and SetSource
class Surface():
"""
Represent both vtkPolyData and associated properties.
"""
general_index = -1
def __init__(self, index=None, name=""):
Surface.general_index += 1
if index is None:
self.index = Surface.general_index
else:
self.index = index
Surface.general_index -= 1
self.polydata = ''
self.colour = ''
self.transparency = const.SURFACE_TRANSPARENCY
self.volume = 0.0
self.area = 0.0
self.is_shown = 1
if not name:
self.name = const.SURFACE_NAME_PATTERN %(self.index+1)
else:
self.name = name
def SavePlist(self, dir_temp, filelist):
filename = 'surface_%d' % self.index
vtp_filename = filename + '.vtp'
vtp_filepath = os.path.join(dir_temp, vtp_filename)
pu.Export(self.polydata, vtp_filepath, bin=True)
filelist[vtp_filepath] = vtp_filename
surface = {'colour': self.colour,
'index': self.index,
'name': self.name,
'polydata': vtp_filename,
'transparency': self.transparency,
'visible': bool(self.is_shown),
'volume': self.volume,
'area': self.area,
}
plist_filename = filename + '.plist'
#plist_filepath = os.path.join(dir_temp, filename + '.plist')
temp_plist = tempfile.mktemp()
plistlib.writePlist(surface, temp_plist)
filelist[temp_plist] = plist_filename
return plist_filename
def OpenPList(self, filename):
sp = plistlib.readPlist(filename)
dirpath = os.path.abspath(os.path.split(filename)[0])
self.index = sp['index']
self.name = sp['name']
self.colour = sp['colour']
self.transparency = sp['transparency']
self.is_shown = sp['visible']
self.volume = sp['volume']
try:
self.area = sp['area']
except KeyError:
self.area = 0.0
self.polydata = pu.Import(os.path.join(dirpath, sp['polydata']))
Surface.general_index = max(Surface.general_index, self.index)
def _set_class_index(self, index):
Surface.general_index = index
# TODO: will be initialized inside control as it is being done?
class SurfaceManager():
"""
Responsible for:
- creating new surfaces;
- managing surfaces' properties;
- removing existing surfaces.
Send pubsub events to other classes:
- GUI: Update progress status
- volume_viewer: Sends surface actors as the are created
"""
def __init__(self):
self.actors_dict = {}
self.last_surface_index = 0
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.AddNewActor, 'Create surface')
Publisher.subscribe(self.SetActorTransparency,
'Set surface transparency')
Publisher.subscribe(self.SetActorColour,
'Set surface colour')
Publisher.subscribe(self.OnChangeSurfaceName, 'Change surface name')
Publisher.subscribe(self.OnShowSurface, 'Show surface')
Publisher.subscribe(self.OnExportSurface,'Export surface to file')
Publisher.subscribe(self.OnLoadSurfaceDict, 'Load surface dict')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnSelectSurface, 'Change surface selected')
#----
Publisher.subscribe(self.OnSplitSurface, 'Split surface')
Publisher.subscribe(self.OnLargestSurface,
'Create surface from largest region')
Publisher.subscribe(self.OnSeedSurface, "Create surface from seeds")
Publisher.subscribe(self.OnDuplicate, "Duplicate surfaces")
Publisher.subscribe(self.OnRemove,"Remove surfaces")
Publisher.subscribe(self.UpdateSurfaceInterpolation, 'Update Surface Interpolation')
Publisher.subscribe(self.OnImportSurfaceFile, 'Import surface file')
def OnDuplicate(self, pubsub_evt):
selected_items = pubsub_evt.data
proj = prj.Project()
surface_dict = proj.surface_dict
for index in selected_items:
original_surface = surface_dict[index]
# compute copy name
name = original_surface.name
names_list = [surface_dict[i].name for i in surface_dict.keys()]
new_name = utl.next_copy_name(name, names_list)
# create new mask
self.CreateSurfaceFromPolydata(polydata = original_surface.polydata,
overwrite = False,
name = new_name,
colour = original_surface.colour,
transparency = original_surface.transparency,
volume = original_surface.volume,
area = original_surface.area)
def OnRemove(self, pubsub_evt):
selected_items = pubsub_evt.data
proj = prj.Project()
old_dict = self.actors_dict
new_dict = {}
if selected_items:
for index in selected_items:
proj.RemoveSurface(index)
actor = old_dict[index]
for i in old_dict:
if i < index:
new_dict[i] = old_dict[i]
if i > index:
new_dict[i-1] = old_dict[i]
old_dict = new_dict
Publisher.sendMessage('Remove surface actor from viewer', actor)
self.actors_dict = new_dict
if self.last_surface_index in selected_items:
if self.actors_dict:
self.last_surface_index = 0
else:
self.last_surface_index = None
def OnSeedSurface(self, pubsub_evt):
"""
Create a new surface, based on the last selected surface,
using as reference seeds user add to surface of reference.
"""
points_id_list = pubsub_evt.data
index = self.last_surface_index
proj = prj.Project()
surface = proj.surface_dict[index]
new_polydata = pu.JoinSeedsParts(surface.polydata,
points_id_list)
index = self.CreateSurfaceFromPolydata(new_polydata)
Publisher.sendMessage('Show single surface', (index, True))
#self.ShowActor(index, True)
def OnSplitSurface(self, pubsub_evt):
"""
Create n new surfaces, based on the last selected surface,
according to their connectivity.
"""
index = self.last_surface_index
proj = prj.Project()
surface = proj.surface_dict[index]
index_list = []
new_polydata_list = pu.SplitDisconectedParts(surface.polydata)
for polydata in new_polydata_list:
index = self.CreateSurfaceFromPolydata(polydata)
index_list.append(index)
#self.ShowActor(index, True)
Publisher.sendMessage('Show multiple surfaces', (index_list, True))
def OnLargestSurface(self, pubsub_evt):
"""
Create a new surface, based on largest part of the last
selected surface.
"""
index = self.last_surface_index
proj = prj.Project()
surface = proj.surface_dict[index]
new_polydata = pu.SelectLargestPart(surface.polydata)
new_index = self.CreateSurfaceFromPolydata(new_polydata)
Publisher.sendMessage('Show single surface', (new_index, True))
def OnImportSurfaceFile(self, pubsub_evt):
"""
Creates a new surface from a surface file (STL, PLY, OBJ or VTP)
"""
filename = pubsub_evt.data
self.CreateSurfaceFromFile(filename)
def CreateSurfaceFromFile(self, filename):
if filename.lower().endswith('.stl'):
reader = vtk.vtkSTLReader()
elif filename.lower().endswith('.ply'):
reader = vtk.vtkPLYReader()
elif filename.lower().endswith('.obj'):
reader = vtk.vtkOBJReader()
elif filename.lower().endswith('.vtp'):
reader = vtk.vtkXMLPolyDataReader()
else:
wx.MessageBox(_("File format not reconized by InVesalius"), _("Import surface error"))
return
reader.SetFileName(filename)
reader.Update()
polydata = reader.GetOutput()
if polydata.GetNumberOfPoints() == 0:
wx.MessageBox(_("InVesalius was not able to import this surface"), _("Import surface error"))
else:
name = os.path.splitext(os.path.split(filename)[-1])[0]
self.CreateSurfaceFromPolydata(polydata, name=name)
def CreateSurfaceFromPolydata(self, polydata, overwrite=False,
name=None, colour=None,
transparency=None, volume=None, area=None):
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(polydata)
normals.SetFeatureAngle(80)
normals.AutoOrientNormalsOn()
normals.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(normals.GetOutput())
mapper.ScalarVisibilityOff()
mapper.ImmediateModeRenderingOn() # improve performance
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if overwrite:
surface = Surface(index = self.last_surface_index)
else:
surface = Surface()
if not colour:
surface.colour = random.choice(const.SURFACE_COLOUR)
else:
surface.colour = colour
surface.polydata = polydata
if transparency:
surface.transparency = transparency
if name:
surface.name = name
# Append surface into Project.surface_dict
proj = prj.Project()
if overwrite:
proj.ChangeSurface(surface)
else:
index = proj.AddSurface(surface)
surface.index = index
self.last_surface_index = index
# Set actor colour and transparency
actor.GetProperty().SetColor(surface.colour)
actor.GetProperty().SetOpacity(1-surface.transparency)
self.actors_dict[surface.index] = actor
session = ses.Session()
session.ChangeProject()
# The following lines have to be here, otherwise all volumes disappear
if not volume or not area:
triangle_filter = vtk.vtkTriangleFilter()
triangle_filter.SetInputData(polydata)
triangle_filter.Update()
measured_polydata = vtk.vtkMassProperties()
measured_polydata.SetInputConnection(triangle_filter.GetOutputPort())
measured_polydata.Update()
volume = measured_polydata.GetVolume()
area = measured_polydata.GetSurfaceArea()
surface.volume = volume
surface.area = area
print ">>>>", surface.volume
else:
surface.volume = volume
surface.area = area
self.last_surface_index = surface.index
Publisher.sendMessage('Load surface actor into viewer', actor)
Publisher.sendMessage('Update surface info in GUI',
(surface.index, surface.name,
surface.colour, surface.volume,
surface.area, surface.transparency))
return surface.index
def OnCloseProject(self, pubsub_evt):
self.CloseProject()
def CloseProject(self):
for index in self.actors_dict:
Publisher.sendMessage('Remove surface actor from viewer', self.actors_dict[index])
del self.actors_dict
self.actors_dict = {}
# restarting the surface index
Surface.general_index = -1
def OnSelectSurface(self, pubsub_evt):
index = pubsub_evt.data
#self.last_surface_index = index
# self.actors_dict.
proj = prj.Project()
surface = proj.surface_dict[index]
Publisher.sendMessage('Update surface info in GUI',
(index, surface.name,
surface.colour, surface.volume,
surface.area, surface.transparency))
self.last_surface_index = index
# if surface.is_shown:
self.ShowActor(index, True)
def OnLoadSurfaceDict(self, pubsub_evt):
surface_dict = pubsub_evt.data
for key in surface_dict:
surface = surface_dict[key]
# Map polygonal data (vtkPolyData) to graphics primitives.
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(surface.polydata)
normals.SetFeatureAngle(80)
normals.AutoOrientNormalsOn()
# normals.GetOutput().ReleaseDataFlagOn()
# Improve performance
stripper = vtk.vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
stripper.PassThroughCellIdsOn()
stripper.PassThroughPointIdsOn()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
mapper.ScalarVisibilityOff()
mapper.ImmediateModeRenderingOn() # improve performance
# Represent an object (geometry & properties) in the rendered scene
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Set actor colour and transparency
actor.GetProperty().SetColor(surface.colour)
actor.GetProperty().SetOpacity(1-surface.transparency)
self.actors_dict[surface.index] = actor
# Send actor by pubsub to viewer's render
Publisher.sendMessage('Load surface actor into viewer', (actor))
Publisher.sendMessage('Update status text in GUI',
_("Ready"))
# The following lines have to be here, otherwise all volumes disappear
Publisher.sendMessage('Update surface info in GUI',
(surface.index, surface.name,
surface.colour, surface.volume,
surface.area, surface.transparency))
if not surface.is_shown:
self.ShowActor(key, False)
####
#(mask_index, surface_name, quality, fill_holes, keep_largest)
def AddNewActor(self, pubsub_evt):
"""
Create surface actor, save into project and send it to viewer.
"""
slice_, mask, surface_parameters = pubsub_evt.data
matrix = slice_.matrix
filename_img = slice_.matrix_filename
spacing = slice_.spacing
algorithm = surface_parameters['method']['algorithm']
options = surface_parameters['method']['options']
surface_name = surface_parameters['options']['name']
quality = surface_parameters['options']['quality']
fill_holes = surface_parameters['options']['fill']
keep_largest = surface_parameters['options']['keep_largest']
mode = 'CONTOUR' # 'GRAYSCALE'
min_value, max_value = mask.threshold_range
colour = mask.colour
try:
overwrite = surface_parameters['options']['overwrite']
except KeyError:
overwrite = False
mask.matrix.flush()
if quality in const.SURFACE_QUALITY.keys():
imagedata_resolution = const.SURFACE_QUALITY[quality][0]
smooth_iterations = const.SURFACE_QUALITY[quality][1]
smooth_relaxation_factor = const.SURFACE_QUALITY[quality][2]
decimate_reduction = const.SURFACE_QUALITY[quality][3]
#if imagedata_resolution:
#imagedata = iu.ResampleImage3D(imagedata, imagedata_resolution)
pipeline_size = 4
if decimate_reduction:
pipeline_size += 1
if (smooth_iterations and smooth_relaxation_factor):
pipeline_size += 1
if fill_holes:
pipeline_size += 1
if keep_largest:
pipeline_size += 1
## Update progress value in GUI
UpdateProgress = vu.ShowProgress(pipeline_size)
UpdateProgress(0, _("Creating 3D surface..."))
language = ses.Session().language
if (prj.Project().original_orientation == const.CORONAL):
flip_image = False
else:
flip_image = True
n_processors = multiprocessing.cpu_count()
pipe_in, pipe_out = multiprocessing.Pipe()
o_piece = 1
piece_size = 2000
n_pieces = int(round(matrix.shape[0] / piece_size + 0.5, 0))
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
p = []
for i in xrange(n_processors):
sp = surface_process.SurfaceProcess(pipe_in, filename_img,
matrix.shape, matrix.dtype,
mask.temp_file,
mask.matrix.shape,
mask.matrix.dtype,
spacing,
mode, min_value, max_value,
decimate_reduction,
smooth_relaxation_factor,
smooth_iterations, language,
flip_image, q_in, q_out,
algorithm != 'Default',
algorithm,
imagedata_resolution)
p.append(sp)
sp.start()
for i in xrange(n_pieces):
init = i * piece_size
end = init + piece_size + o_piece
roi = slice(init, end)
q_in.put(roi)
print "new_piece", roi
for i in p:
q_in.put(None)
none_count = 1
while 1:
msg = pipe_out.recv()
if(msg is None):
none_count += 1
else:
UpdateProgress(msg[0]/(n_pieces * pipeline_size), msg[1])
if none_count > n_pieces:
break
polydata_append = vtk.vtkAppendPolyData()
# polydata_append.ReleaseDataFlagOn()
t = n_pieces
while t:
filename_polydata = q_out.get()
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename_polydata)
# reader.ReleaseDataFlagOn()
reader.Update()
# reader.GetOutput().ReleaseDataFlagOn()
polydata = reader.GetOutput()
# polydata.SetSource(None)
polydata_append.AddInputData(polydata)
del reader
del polydata
t -= 1
polydata_append.Update()
# polydata_append.GetOutput().ReleaseDataFlagOn()
polydata = polydata_append.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del polydata_append
if algorithm == 'ca_smoothing':
normals = vtk.vtkPolyDataNormals()
normals_ref = weakref.ref(normals)
normals_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(normals_ref(), _("Creating 3D surface...")))
normals.SetInputData(polydata)
# normals.ReleaseDataFlagOn()
#normals.SetFeatureAngle(80)
#normals.AutoOrientNormalsOn()
normals.ComputeCellNormalsOn()
# normals.GetOutput().ReleaseDataFlagOn()
normals.Update()
del polydata
polydata = normals.GetOutput()
# polydata.SetSource(None)
del normals
clean = vtk.vtkCleanPolyData()
# clean.ReleaseDataFlagOn()
# clean.GetOutput().ReleaseDataFlagOn()
clean_ref = weakref.ref(clean)
clean_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(clean_ref(), _("Creating 3D surface...")))
clean.SetInputData(polydata)
clean.PointMergingOn()
clean.Update()
del polydata
polydata = clean.GetOutput()
# polydata.SetSource(None)
del clean
# try:
# polydata.BuildLinks()
# except TypeError:
# polydata.BuildLinks(0)
# polydata = ca_smoothing.ca_smoothing(polydata, options['angle'],
# options['max distance'],
# options['min weight'],
# options['steps'])
mesh = cy_mesh.Mesh(polydata)
cy_mesh.ca_smoothing(mesh, options['angle'],
options['max distance'],
options['min weight'],
options['steps'])
# polydata = mesh.to_vtk()
# polydata.SetSource(None)
# polydata.DebugOn()
else:
#smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother = vtk.vtkSmoothPolyDataFilter()
smoother_ref = weakref.ref(smoother)
smoother_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(smoother_ref(), _("Creating 3D surface...")))
smoother.SetInputData(polydata)
smoother.SetNumberOfIterations(smooth_iterations)
smoother.SetRelaxationFactor(smooth_relaxation_factor)
smoother.SetFeatureAngle(80)
#smoother.SetEdgeAngle(90.0)
#smoother.SetPassBand(0.1)
smoother.BoundarySmoothingOn()
smoother.FeatureEdgeSmoothingOn()
#smoother.NormalizeCoordinatesOn()
#smoother.NonManifoldSmoothingOn()
# smoother.ReleaseDataFlagOn()
# smoother.GetOutput().ReleaseDataFlagOn()
smoother.Update()
del polydata
polydata = smoother.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del smoother
if decimate_reduction:
print "Decimating", decimate_reduction
decimation = vtk.vtkQuadricDecimation()
# decimation.ReleaseDataFlagOn()
decimation.SetInputData(polydata)
decimation.SetTargetReduction(decimate_reduction)
decimation_ref = weakref.ref(decimation)
decimation_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(decimation_ref(), _("Creating 3D surface...")))
#decimation.PreserveTopologyOn()
#decimation.SplittingOff()
#decimation.BoundaryVertexDeletionOff()
# decimation.GetOutput().ReleaseDataFlagOn()
decimation.Update()
del polydata
polydata = decimation.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del decimation
to_measure = polydata
#to_measure.Register(None)
# to_measure.SetSource(None)
if keep_largest:
conn = vtk.vtkPolyDataConnectivityFilter()
conn.SetInputData(polydata)
conn.SetExtractionModeToLargestRegion()
conn_ref = weakref.ref(conn)
conn_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(conn_ref(), _("Creating 3D surface...")))
conn.Update()
# conn.GetOutput().ReleaseDataFlagOn()
del polydata
polydata = conn.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del conn
#Filter used to detect and fill holes. Only fill boundary edges holes.
#TODO: Hey! This piece of code is the same from
#polydata_utils.FillSurfaceHole, we need to review this.
if fill_holes:
filled_polydata = vtk.vtkFillHolesFilter()
# filled_polydata.ReleaseDataFlagOn()
filled_polydata.SetInputData(polydata)
filled_polydata.SetHoleSize(300)
filled_polydata_ref = weakref.ref(filled_polydata)
filled_polydata_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(filled_polydata_ref(), _("Creating 3D surface...")))
filled_polydata.Update()
# filled_polydata.GetOutput().ReleaseDataFlagOn()
del polydata
polydata = filled_polydata.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
# polydata.DebugOn()
del filled_polydata
normals = vtk.vtkPolyDataNormals()
# normals.ReleaseDataFlagOn()
normals_ref = weakref.ref(normals)
normals_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(normals_ref(), _("Creating 3D surface...")))
normals.SetInputData(polydata)
normals.SetFeatureAngle(80)
normals.AutoOrientNormalsOn()
# normals.GetOutput().ReleaseDataFlagOn()
normals.Update()
del polydata
polydata = normals.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del normals
# Improve performance
stripper = vtk.vtkStripper()
# stripper.ReleaseDataFlagOn()
stripper_ref = weakref.ref(stripper)
stripper_ref().AddObserver("ProgressEvent", lambda obj,evt:
UpdateProgress(stripper_ref(), _("Creating 3D surface...")))
stripper.SetInputData(polydata)
stripper.PassThroughCellIdsOn()
stripper.PassThroughPointIdsOn()
# stripper.GetOutput().ReleaseDataFlagOn()
stripper.Update()
del polydata
polydata = stripper.GetOutput()
#polydata.Register(None)
# polydata.SetSource(None)
del stripper
# Map polygonal data (vtkPolyData) to graphics primitives.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
mapper.ScalarVisibilityOff()
# mapper.ReleaseDataFlagOn()
mapper.ImmediateModeRenderingOn() # improve performance
# Represent an object (geometry & properties) in the rendered scene
actor = vtk.vtkActor()
actor.SetMapper(mapper)
del mapper
#Create Surface instance
if overwrite:
surface = Surface(index = self.last_surface_index)
else:
surface = Surface(name=surface_name)
surface.colour = colour
surface.polydata = polydata
del polydata
# Set actor colour and transparency
actor.GetProperty().SetColor(colour)
actor.GetProperty().SetOpacity(1-surface.transparency)
prop = actor.GetProperty()
interpolation = int(ses.Session().surface_interpolation)
prop.SetInterpolation(interpolation)
proj = prj.Project()
if overwrite:
proj.ChangeSurface(surface)
else:
index = proj.AddSurface(surface)
surface.index = index
self.last_surface_index = index
session = ses.Session()
session.ChangeProject()
# The following lines have to be here, otherwise all volumes disappear
measured_polydata = vtk.vtkMassProperties()
# measured_polydata.ReleaseDataFlagOn()
measured_polydata.SetInputData(to_measure)
volume = float(measured_polydata.GetVolume())
area = float(measured_polydata.GetSurfaceArea())
surface.volume = volume
surface.area = area
self.last_surface_index = surface.index
del measured_polydata
del to_measure
Publisher.sendMessage('Load surface actor into viewer', actor)
# Send actor by pubsub to viewer's render
if overwrite and self.actors_dict.keys():
old_actor = self.actors_dict[self.last_surface_index]
Publisher.sendMessage('Remove surface actor from viewer', old_actor)
# Save actor for future management tasks
self.actors_dict[surface.index] = actor
Publisher.sendMessage('Update surface info in GUI',
(surface.index, surface.name,
surface.colour, surface.volume,
surface.area,
surface.transparency))
#When you finalize the progress. The bar is cleaned.
UpdateProgress = vu.ShowProgress(1)
UpdateProgress(0, _("Ready"))
Publisher.sendMessage('Update status text in GUI', _("Ready"))
Publisher.sendMessage('End busy cursor')
del actor
def UpdateSurfaceInterpolation(self, pub_evt):
interpolation = int(ses.Session().surface_interpolation)
key_actors = self.actors_dict.keys()
for key in self.actors_dict:
self.actors_dict[key].GetProperty().SetInterpolation(interpolation)
Publisher.sendMessage('Render volume viewer')
def RemoveActor(self, index):
"""
Remove actor, according to given actor index.
"""
Publisher.sendMessage('Remove surface actor from viewer', (index))
self.actors_dict.pop(index)
# Remove surface from project's surface_dict
proj = prj.Project()
proj.surface_dict.pop(index)
def OnChangeSurfaceName(self, pubsub_evt):
index, name = pubsub_evt.data
proj = prj.Project()
proj.surface_dict[index].name = name
def OnShowSurface(self, pubsub_evt):
index, value = pubsub_evt.data
self.ShowActor(index, value)
def ShowActor(self, index, value):
"""
Show or hide actor, according to given actor index and value.
"""
self.actors_dict[index].SetVisibility(value)
# Update value in project's surface_dict
proj = prj.Project()
proj.surface_dict[index].is_shown = value
Publisher.sendMessage('Render volume viewer')
def SetActorTransparency(self, pubsub_evt):
"""
Set actor transparency (oposite to opacity) according to given actor
index and value.
"""
index, value = pubsub_evt.data
self.actors_dict[index].GetProperty().SetOpacity(1-value)
# Update value in project's surface_dict
proj = prj.Project()
proj.surface_dict[index].transparency = value
Publisher.sendMessage('Render volume viewer')
def SetActorColour(self, pubsub_evt):
"""
"""
index, colour = pubsub_evt.data
self.actors_dict[index].GetProperty().SetColor(colour)
# Update value in project's surface_dict
proj = prj.Project()
proj.surface_dict[index].colour = colour
Publisher.sendMessage('Render volume viewer')
def OnExportSurface(self, pubsub_evt):
filename, filetype = pubsub_evt.data
if (filetype == const.FILETYPE_STL) or\
(filetype == const.FILETYPE_VTP) or\
(filetype == const.FILETYPE_PLY) or\
(filetype == const.FILETYPE_STL_ASCII):
# First we identify all surfaces that are selected
# (if any)
proj = prj.Project()
polydata_list = []
for index in proj.surface_dict:
surface = proj.surface_dict[index]
if surface.is_shown:
polydata_list.append(surface.polydata)
if len(polydata_list) == 0:
utl.debug("oops - no polydata")
return
elif len(polydata_list) == 1:
polydata = polydata_list[0]
else:
polydata = pu.Merge(polydata_list)
# Having a polydata that represents all surfaces
# selected, we write it, according to filetype
if filetype == const.FILETYPE_STL:
writer = vtk.vtkSTLWriter()
writer.SetFileTypeToBinary()
elif filetype == const.FILETYPE_STL_ASCII:
writer = vtk.vtkSTLWriter()
writer.SetFileTypeToASCII()
elif filetype == const.FILETYPE_VTP:
writer = vtk.vtkXMLPolyDataWriter()
#elif filetype == const.FILETYPE_IV:
# writer = vtk.vtkIVWriter()
elif filetype == const.FILETYPE_PLY:
writer = vtk.vtkPLYWriter()
writer.SetFileTypeToASCII()
writer.SetColorModeToOff()
#writer.SetDataByteOrderToLittleEndian()
#writer.SetColorModeToUniformCellColor()
#writer.SetColor(255, 0, 0)
if filetype in (const.FILETYPE_STL, const.FILETYPE_PLY):
# Invert normals
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(polydata)
normals.SetFeatureAngle(80)
normals.AutoOrientNormalsOn()
# normals.GetOutput().ReleaseDataFlagOn()
normals.UpdateInformation()
normals.Update()
polydata = normals.GetOutput()
filename = filename.encode(wx.GetDefaultPyEncoding())
writer.SetFileName(filename)
writer.SetInputData(polydata)
writer.Write()
|
fabio-otsuka/invesalius3
|
invesalius/data/surface.py
|
Python
|
gpl-2.0
| 35,564
|
# -----------------------------------------------------------
# demonstrates how to extract the items of a dictionary using items()
#o
# (C) 2015 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# define an array (dictionary)
capital = {
"France": "Paris",
"Switzerland": "Bern",
"Germany": "Berlin"
}
# output dictionary content (key-value-pairs)
print (capital)
# extract the items of the dictionary
entries = capital.items()
# count the items
print("#items:", len(entries))
print(entries)
|
plasmashadow/training-python
|
lists-dictionaries-tuples/list-items.py
|
Python
|
gpl-2.0
| 616
|
import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.contrib.layers.python.layers import regularizers
module_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.batch_generator import datasets
slim = tf.contrib.slim
tf.reset_default_graph()
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
# Contants
image_channels = 3
time_frames_to_consider = 8
time_frames_to_predict = 8
interval=4 # frames to jump !
heigth_train= 64
width_train= 64
custom_test_size=[160,210]
heigth_test, width_test = custom_test_size
#===================================================================
# Generative Model Parameters
#===================================================================
# +1 for input from previous layer !
scale_level_feature_maps= [[16, 32, 64, 3],
[16, 32, 64, 3],
[32, 64, 128, 3],
[32, 64, 128, 128, 3]]
# as size of image increase in scaling ... conv layer increases !
scale_level_kernel_size = [
[3, 3, 3, 3],
[5, 3, 3, 3],
[5, 5, 5, 5],
[7, 5, 5, 5, 5]
]
#===================================================================
# Descriminative Model Parameters
#===================================================================
disc_scale_level_feature_maps = [[16],
[16, 32, 32],
[32, 64, 64],
[32, 64, 128, 128]]
# kernel sizes for each convolution of each scale network in the discriminator model
disc_scale_level_kernel_size = [[3],
[3, 3, 3],
[5, 5, 5],
[7, 7, 5, 5]]
# layer sizes for each fully-connected layer of each scale network in the discriminator model
# layer connecting conv to fully-connected is dynamically generated when creating the model
disc_fc_layer_units = [[128, 64, 1],
[256, 128, 1],
[256, 128, 1],
[256, 128, 1]]
#===================================================================
# regularizer !
l2_val = 0.00005
# Adam optimizer !
adam_learning_rate = 0.0004
# Tensorboard images to show
batch_size = 8
number_of_images_to_show = 4
assert number_of_images_to_show <= batch_size, "images to show should be less !"
timesteps=24
file_path = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(file_path, "../../data/")
log_dir_file_path = os.path.join(file_path, "../../logs/")
model_save_file_path = os.path.join(file_path, "../../checkpoint/")
output_video_save_file_path = os.path.join(file_path, "../../output/")
iterations = "iterations/"
best = "best/"
checkpoint_iterations = 100
best_model_iterations = 100
test_model_iterations = 5
best_loss = float("inf")
heigth, width = heigth_train, width_train
channels = 3
assert timesteps>=time_frames_to_consider and timesteps>=time_frames_to_predict, "time steps must be greater !"
#==================== COPIED CODE ===============================================
#
# TENSORBOARD VISUALIZATION FOR SHARPNESS AND (Peak Signal to Noise Ratio){PSNR}
#=================================================================================
def log10(t):
"""
Calculates the base-10 log of each element in t.
@param t: The tensor from which to calculate the base-10 log.
@return: A tensor with the base-10 log of each element in t.
"""
numerator = tf.log(t)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr_error(gen_frames, gt_frames):
"""
Computes the Peak Signal to Noise Ratio error between the generated images and the ground
truth images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
square_diff = tf.square(gt_frames - gen_frames)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
def sharp_diff_error(gen_frames, gt_frames):
"""
Computes the Sharpness Difference error between the generated images and the ground truth
images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
# gradient difference
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
# TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
gen_grad_sum = gen_dx + gen_dy
gt_grad_sum = gt_dx + gt_dy
grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
## =================== COPIED CODE ENDS ======================
def l2_loss(generated_frames, expected_frames):
losses = []
for each_scale_gen_frames, each_scale_exp_frames in zip(generated_frames, expected_frames):
losses.append(tf.nn.l2_loss(tf.subtract(each_scale_gen_frames, each_scale_exp_frames)))
loss = tf.reduce_mean(tf.stack(losses))
return loss
def gdl_loss(generated_frames, expected_frames, alpha=2):
"""
difference with side pixel and below pixel
"""
scale_losses = []
for i in xrange(len(generated_frames)):
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(generated_frames[i], filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(generated_frames[i], filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(expected_frames[i], filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(expected_frames[i], filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
scale_losses.append(tf.reduce_sum((grad_diff_x ** alpha + grad_diff_y ** alpha)))
# condense into one tensor and avg
return tf.reduce_mean(tf.stack(scale_losses))
def total_loss(generated_frames, expected_frames, loss_from_disc, lambda_gdl=1.0, lambda_l2=1.0, lambda_disc=1.0):
total_loss_cal = (lambda_gdl * gdl_loss(generated_frames, expected_frames) +
lambda_l2 * l2_loss(generated_frames, expected_frames)+
lambda_disc * loss_from_disc)
return total_loss_cal
#===================================================================
# Discriminator Model
#===================================================================
class ScaleBasedDiscriminator:
def __init__(self, heigth, width, kernel_size, feature_maps, fc_layer_units, scale_number):
assert len(feature_maps)==len(kernel_size), "Length should be equal !"
self.heigth = heigth
self.width = width
self.kernel_size = kernel_size
self.feature_maps = feature_maps
self.fc_layer_units = fc_layer_units
self.scale_number = scale_number
self.input = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth, self.width, image_channels])
self.create_graph()
def create_graph(self):
predication = self.input
with tf.variable_scope('discriminator_scale_'+str(self.scale_number)):
conv_counter = 0
for index, (each_filter, each_kernel) in enumerate(zip(self.feature_maps, self.kernel_size)):
with tf.variable_scope('conv_'+str(conv_counter)):
conv_counter += 1
stride = 1
# last layer stride 2 ... fc layer weights reduce ...
if index == (len(self.feature_maps)-1):
stride = 2
predication = slim.conv2d(predication, each_filter, [each_kernel, each_kernel],
padding = 'VALID',
stride = stride,
weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val))
# print predication
predication = slim.flatten(predication)
# print predication
fully_connected_counter = 0
for index, each_layer_units in enumerate(self.fc_layer_units):
with tf.variable_scope('fully_connected'+str(fully_connected_counter)):
fully_connected_counter += 1
activation = tf.nn.relu
# last layer sigmoid !
if index == (len(self.fc_layer_units)-1):
activation = tf.nn.sigmoid
predication = slim.fully_connected(predication, each_layer_units, activation_fn=activation)
# print predication
# clip value between 0.1 and 0.9
self.predication = tf.clip_by_value(predication, 0.1, 0.9)
class Discriminator:
def __init__(self, heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units):
assert len(disc_scale_level_feature_maps)==len(disc_scale_level_kernel_size), "Length should be equal !"
assert len(disc_scale_level_feature_maps)==len(disc_fc_layer_units), "Length should be equal !"
self.heigth = heigth
self.width = width
self.disc_scale_level_feature_maps = disc_scale_level_feature_maps
self.disc_scale_level_kernel_size = disc_scale_level_kernel_size
self.disc_fc_layer_units = disc_fc_layer_units
# ground truth image
self.ground_truth_images = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth, self.width, image_channels])
# real or fake
self.ground_truth_labels = tf.placeholder(dtype=tf.float32, shape=[None,1])
self.len_scale = len(self.disc_scale_level_kernel_size)
self.create_graph()
self.loss()
self.scale_images_ground_truth_for_inputs()
self.tf_summary()
def create_graph(self,):
self.scale_based_discriminators = []
for each_scale, (each_feature_map, each_kernel_size, each_fc_layer) in enumerate(zip(self.disc_scale_level_feature_maps, self.disc_scale_level_kernel_size, self.disc_fc_layer_units)):
# scaling create [1/64, 1/32, 1/16, 1/4]
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
rescaled_heigth = int(scaling_factor * self.heigth)
rescaled_width = int(scaling_factor * self.width)
disc_at_scale = ScaleBasedDiscriminator(heigth=rescaled_heigth,
width=rescaled_width, kernel_size=each_kernel_size,
feature_maps=each_feature_map,
fc_layer_units=each_fc_layer, scale_number=each_scale)
self.scale_based_discriminators.append(disc_at_scale)
self.scaled_disc_predication = []
for each_scaled_pred in self.scale_based_discriminators:
self.scaled_disc_predication.append(each_scaled_pred.predication)
# print self.scaled_disc_predication
def loss(self):
total_loss = []
for each_scaled_op in self.scaled_disc_predication:
# print each_scaled_op, self.ground_truth_labels
curr_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.ground_truth_labels, logits=each_scaled_op)
total_loss.append(curr_loss)
self.dis_loss = tf.reduce_mean(tf.stack(total_loss))
self.optimizer = tf.train.AdamOptimizer(adam_learning_rate)
global_step = tf.Variable(0,name="dis_global_step_var",trainable=False)
self.step = self.optimizer.minimize(self.dis_loss, global_step=global_step)
def rescale_image(self, scaling_factor, heigth, width, ground_truths):
"""
scaling_factor, heigth, width = values
input_data, ground_truths = Tensors
"""
rescaled_heigth = int(scaling_factor * heigth)
rescaled_width = int(scaling_factor * width)
assert rescaled_heigth != 0 and rescaled_width != 0, "scaling factor should not be zero !"
ground_truths_reshaped = tf.image.resize_images(ground_truths, [rescaled_heigth, rescaled_width])
return ground_truths_reshaped
def scale_images_ground_truth_for_inputs(self,):
inputs = []
for each_scale in range(self.len_scale):
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
inputs.append(self.rescale_image(scaling_factor, self.heigth, self.width, self.ground_truth_images))
self.rescaled_ground_truth_images = inputs
# print inputs
def tf_summary(self):
train_loss = tf.summary.scalar("dis_train_loss", self.dis_loss)
self.train_summary_merged = tf.summary.merge([train_loss])
#===================================================================
# Generative Model
#===================================================================
class GenerativeNetwork:
def __init__(self, heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size):
self.heigth_train = heigth_train
self.width_train = width_train
self.heigth_test = heigth_test
self.width_test = width_test
self.scale_level_feature_maps = scale_level_feature_maps
self.scale_level_kernel_size = scale_level_kernel_size
self.len_scale = len(self.scale_level_kernel_size)
assert len(self.scale_level_feature_maps) == len(self.scale_level_kernel_size), "Length should be equal !"
# Placeholders for inputs and outputs ... !
self.input_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, time_frames_to_consider * image_channels])
self.output_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, image_channels])
self.input_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, time_frames_to_consider * image_channels])
self.output_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, image_channels])
self.loss_from_disc = tf.placeholder(dtype=tf.float32, shape=[])
self.each_scale_predication_train = []
self.each_scale_ground_truth_train = []
self.each_scale_predication_test = []
self.each_scale_ground_truth_test = []
self.create_graph(self.input_train, self.output_train, heigth_train, width_train,
self.each_scale_predication_train,
self.each_scale_ground_truth_train,
reuse=None)
# reuse graph at time of test !
self.create_graph(self.input_test, self.output_test, heigth_test, width_test,
self.each_scale_predication_test,
self.each_scale_ground_truth_test,
reuse=True)
self.loss()
self.tf_summary()
# print self.each_scale_predication_train
# print self.each_scale_ground_truth_train
# print self.each_scale_predication_test
# print self.each_scale_ground_truth_test
def rescale_image(self, scaling_factor, heigth, width, input_data, ground_truths, last_generated_frame):
"""
scaling_factor, heigth, width = values
input_data, ground_truths = Tensors
"""
rescaled_heigth = int(scaling_factor * heigth)
rescaled_width = int(scaling_factor * width)
assert rescaled_heigth != 0 and rescaled_width != 0, "scaling factor should not be zero !"
input_reshaped = tf.image.resize_images(input_data, [rescaled_heigth, rescaled_width])
ground_truths_reshaped = tf.image.resize_images(ground_truths, [rescaled_heigth, rescaled_width])
last_generated_frame_reshaped = None
if last_generated_frame!=None:
last_generated_frame_reshaped = tf.image.resize_images(last_generated_frame, [rescaled_heigth, rescaled_width])
return (input_reshaped, ground_truths_reshaped, last_generated_frame_reshaped)
def create_graph(self, input_data, ground_truths, heigth, width,
predicated_at_each_scale_tensor, ground_truth_at_each_scale_tensor, reuse):
# for each scale ...
for each_scale in range(self.len_scale):
conv_counter = 0
with tf.variable_scope('scale_'+str(each_scale),reuse=reuse):
# scaling create [1/64, 1/32, 1/16, 1/4]
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
last_generated_frame = None
if each_scale > 0:
last_generated_frame = predicated_at_each_scale_tensor[each_scale-1]
input_reshaped, ground_truths_reshaped, last_generated_frame_reshaped = self.rescale_image(scaling_factor, heigth, width, input_data, ground_truths, last_generated_frame)
# append last scale output
if each_scale > 0:
input_reshaped = tf.concat([input_reshaped, last_generated_frame_reshaped],axis=3)
# print (input_reshaped, ground_truths_reshaped)
predication = input_reshaped
# for each conv layers in that scale ...
feature_maps = scale_level_feature_maps[each_scale]
kernel_size = scale_level_kernel_size[each_scale]
assert len(feature_maps)==len(kernel_size), "Length should be equal !"
for index, (each_filter, each_kernel) in enumerate(zip(feature_maps, kernel_size)):
with tf.variable_scope('conv_'+str(conv_counter),reuse=reuse):
conv_counter += 1
activiation = tf.nn.relu
# last layer tanh !
if index==(len(kernel_size)-1):
activiation = tf.nn.tanh
predication = slim.conv2d(predication, each_filter, [each_kernel, each_kernel],
weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val),
activation_fn=activiation)
# APPEND LAST GENERATED FRAME
predicated_at_each_scale_tensor.append(predication)
ground_truth_at_each_scale_tensor.append(ground_truths_reshaped)
def loss(self):
# discriminator, gdl and l2 loss !
self.combined_loss = total_loss(self.each_scale_predication_train, self.each_scale_ground_truth_train, self.loss_from_disc)
self.optimizer = tf.train.AdamOptimizer(adam_learning_rate)
global_step = tf.Variable(0,name="global_step_var",trainable=False)
self.step = self.optimizer.minimize(self.combined_loss, global_step=global_step)
def tf_summary(self):
train_loss = tf.summary.scalar("gen_train_loss", self.combined_loss)
val_loss = tf.summary.scalar("gen_val_loss", self.combined_loss)
with tf.variable_scope('image_measures'):
psnr_error_train = psnr_error(self.each_scale_predication_train[-1], self.output_train)
psnr_error_train_s = tf.summary.scalar("train_psnr",psnr_error_train)
psnr_error_val_s = tf.summary.scalar("val_psnr",psnr_error_train)
sharpdiff_error_train = sharp_diff_error(self.each_scale_predication_train[-1],self.output_train)
sharpdiff_error_train_s = tf.summary.scalar("train_shardiff",sharpdiff_error_train)
sharpdiff_error_val_s = tf.summary.scalar("val_shardiff",sharpdiff_error_train)
images_to_show_train = []
images_to_show_val = []
len_pred = len(self.each_scale_predication_train)
for index_scale in range(len_pred-2,len_pred):
images_to_show_train.append(tf.summary.image('train_output_scale_' + str(index_scale), self.each_scale_predication_train[index_scale],
number_of_images_to_show))
images_to_show_train.append(tf.summary.image('train_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_train[index_scale],
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_output_scale_' + str(index_scale), self.each_scale_predication_train[index_scale],
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_train[index_scale],
number_of_images_to_show))
psnr_error_test = psnr_error(self.each_scale_predication_test[-1], self.output_test)
psnr_error_test_s = tf.summary.scalar("test_psnr",psnr_error_test)
sharpdiff_error_test = sharp_diff_error(self.each_scale_predication_test[-1],self.output_test)
sharpdiff_error_test_s = tf.summary.scalar("test_shardiff",sharpdiff_error_test)
images_to_show_test = []
len_pred = len(self.each_scale_predication_test)
for index_scale in range(len_pred-2,len_pred):
images_to_show_test.append(tf.summary.image('test_output_scale_' + str(index_scale), self.each_scale_predication_test[index_scale],
number_of_images_to_show))
images_to_show_test.append(tf.summary.image('test_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_test[index_scale],
number_of_images_to_show))
self.train_summary_merged = tf.summary.merge([train_loss, psnr_error_train_s, sharpdiff_error_train_s]+images_to_show_train)
self.test_summary_merged = tf.summary.merge([psnr_error_test_s, sharpdiff_error_test_s]+images_to_show_test)
self.val_summary_merged = tf.summary.merge([val_loss, psnr_error_val_s, sharpdiff_error_val_s]+images_to_show_val)
# ======================== MODEL ENDS ========================
def log_directory_creation(sess):
if tf.gfile.Exists(log_dir_file_path):
tf.gfile.DeleteRecursively(log_dir_file_path)
tf.gfile.MakeDirs(log_dir_file_path)
# model save directory
if os.path.exists(model_save_file_path):
x_folder = iterations
print ("loading model from ",x_folder)
restore_model_session(sess, x_folder + "gan8_model")
else:
os.makedirs(model_save_file_path + iterations)
os.makedirs(model_save_file_path + best)
# output dir creation
if not os.path.exists(output_video_save_file_path):
os.makedirs(output_video_save_file_path)
def save_model_session(sess, file_name):
saver = tf.train.Saver()
save_path = saver.save(sess, model_save_file_path + file_name)
def restore_model_session(sess, file_name):
saver = tf.train.Saver() # tf.train.import_meta_graph(model_save_file_path + file_name + ".meta")
saver.restore(sess, model_save_file_path + file_name)
print ("graph loaded!")
def is_correct_batch_shape(X_batch, y_batch, info="train",heigth=heigth, width=width):
# info can be {"train", "val"}
if (X_batch is None or y_batch is None or
X_batch.shape[1:] != (timesteps, heigth, width, channels) or
y_batch.shape[1:] != (timesteps, heigth, width, channels)):
print ("Warning: skipping this " + info + " batch because of shape")
return False
return True
def images_to_channels(X_batch):
"""
This utility convert (Batch Size, TimeSteps, H, W, C) => (Batch Size, H, W, C, TimeSteps) => (Batch Size, H, W, C * TimeSteps)
Refer Input of Mutli Scale Architecture !
"""
input_data = X_batch.transpose(0,2,3,4,1)
input_data = input_data.reshape(list(input_data.shape[:-2])+[-1])
return input_data
def remove_oldest_image_add_new_image(X_batch,y_batch):
"""
While frame predications each time step remove oldest image and newest image
"""
removed_older_image = X_batch[:,:,:,channels:]
new_batch = np.append(removed_older_image, y_batch, axis=3)
return new_batch
def alternate_disc_gen_training(sess, disc_model, gen_model, input_train, output_train):
# get scaled input on ground truth image !
rescaled_ground_truth_images = sess.run(disc_model.rescaled_ground_truth_images, feed_dict={disc_model.ground_truth_images: output_train})
new_feed_dict = {}
for i in range(len(rescaled_ground_truth_images)):
new_feed_dict [ disc_model.scale_based_discriminators[i].input ] = rescaled_ground_truth_images[i]
# real images !
new_feed_dict[disc_model.ground_truth_labels] = np.ones([len(input_train),1])
# disc train on real data
_, disc_summary_real = sess.run([disc_model.step, disc_model.train_summary_merged] ,feed_dict=new_feed_dict)
# gen predict on real data => predicated
each_scale_predication_train = sess.run(gen_model.each_scale_predication_train, feed_dict={gen_model.input_train : input_train, gen_model.output_train : output_train})
new_feed_dict = {}
for i in range(len(each_scale_predication_train)):
new_feed_dict [ disc_model.scale_based_discriminators[i].input ] = each_scale_predication_train[i]
# fake images !
new_feed_dict[disc_model.ground_truth_labels] = np.zeros([len(input_train),1])
# disc train on predicated by gen
_, disc_summary_fake, dis_loss = sess.run([disc_model.step, disc_model.train_summary_merged, disc_model.dis_loss] ,feed_dict=new_feed_dict)
# gen take loss from disc and train
_, gen_summary = sess.run([gen_model.step, gen_model.train_summary_merged], feed_dict={gen_model.loss_from_disc : dis_loss,
gen_model.input_train : input_train,
gen_model.output_train : output_train
})
return (disc_summary_real, disc_summary_fake, gen_summary)
def validation(sess, gen_model, data, val_writer, val_step):
loss = []
for X_batch, y_batch, _ in data.val_next_batch():
if not is_correct_batch_shape(X_batch, y_batch, "val"):
print ("validation batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
Y_output = np.zeros((len(X_input),time_frames_to_predict,heigth,width,channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, combined_loss, train_summary_merged = sess.run([gen_model.each_scale_predication_train[-1], gen_model.combined_loss,gen_model.val_summary_merged], feed_dict={gen_model.loss_from_disc : 0.0,
gen_model.input_train : X_input,
gen_model.output_train : output_train})
loss.append(combined_loss)
val_writer.add_summary(train_summary_merged, val_step)
val_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if len(loss)==0:
return (val_step, float("inf"))
return (val_step, sum(loss)/float(len(loss)))
def test(sess, gen_model, data, test_writer, test_step, is_store_output=False):
for X_batch, y_batch, file_names in data.get_custom_test_data():
if not is_correct_batch_shape(X_batch, y_batch, "test",heigth=custom_test_size[0], width=custom_test_size[1]):
print ("test batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
# store output ...
Y_output = np.zeros((len(X_batch),time_frames_to_predict,custom_test_size[0],custom_test_size[1],channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, test_summary_merged = sess.run([gen_model.each_scale_predication_test[-1], gen_model.test_summary_merged], feed_dict={gen_model.loss_from_disc : 0.0,
gen_model.input_test : X_input,
gen_model.output_test : output_train})
test_writer.add_summary(test_summary_merged, test_step)
test_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if is_store_output:
# save with filnames
expected_frames = X_batch[:,time_frames_to_consider:time_frames_to_consider+time_frames_to_predict,:,:,:]
# image post processing is happening inside of store ...
# store
store_file_names_gen = data.frame_ext.generate_output_video(Y_output, file_names, ext_add_to_file_name="_generated_large")
store_file_names_exp = data.frame_ext.generate_output_video(expected_frames, file_names, ext_add_to_file_name="_expected_large")
speed = 1
data.frame_ext.generate_gif_videos(store_file_names_gen,speed=speed)
data.frame_ext.generate_gif_videos(store_file_names_exp,speed=speed)
return test_step
def test_wrapper():
with tf.Session() as sess:
disc_model = Discriminator(heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units)
gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
gen_train_writer = tf.summary.FileWriter(log_dir_file_path + "gen_train", sess.graph)
des_train_writer = tf.summary.FileWriter(log_dir_file_path + "des_train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
val_writer = tf.summary.FileWriter(log_dir_file_path + "val", sess.graph)
global_step = 0
gen_count_iter = 0
des_count_iter = 0
val_count_iter = 0
test_count_iter = 0
val_loss_seen = float("inf")
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
test_count_iter = test(sess, gen_model, data, test_writer, test_count_iter, is_store_output=True)
def train():
global best_loss
with tf.Session() as sess:
disc_model = Discriminator(heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units)
gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
gen_train_writer = tf.summary.FileWriter(log_dir_file_path + "gen_train", sess.graph)
des_train_writer = tf.summary.FileWriter(log_dir_file_path + "des_train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
val_writer = tf.summary.FileWriter(log_dir_file_path + "val", sess.graph)
global_step = 0
gen_count_iter = 0
des_count_iter = 0
val_count_iter = 0
test_count_iter = 0
val_loss_seen = float("inf")
while True:
try:
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
for X_batch, y_batch, _ in data.train_next_batch():
# print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
if not is_correct_batch_shape(X_batch, y_batch, "train"):
# global step not increased !
continue
for each_timesteps in range(time_frames_to_consider, timesteps-time_frames_to_consider):
input_train = X_batch[:, each_timesteps-time_frames_to_consider:each_timesteps, :,:,:]
input_train = images_to_channels(input_train)
output_train = X_batch[:,each_timesteps,:,:,:]
disc_summary_real, disc_summary_fake, gen_summary = alternate_disc_gen_training(sess, disc_model, gen_model, input_train, output_train)
gen_train_writer.add_summary(gen_summary, gen_count_iter)
gen_count_iter += 1
des_train_writer.add_summary(disc_summary_real, des_count_iter)
des_count_iter += 1
des_train_writer.add_summary(disc_summary_fake, des_count_iter)
des_count_iter += 1
if global_step % checkpoint_iterations == 0:
save_model_session(sess, iterations + "gan8_model")
if global_step % best_model_iterations == 0:
val_count_iter, curr_loss = validation(sess, gen_model, data, val_writer, val_count_iter)
if curr_loss < val_loss_seen:
val_loss_seen = curr_loss
save_model_session(sess, best + "gan8_model")
if global_step % test_model_iterations == 0:
test_count_iter = test(sess, gen_model, data, test_writer, test_count_iter)
print ("Iteration ", global_step, " best_loss ", val_loss_seen)
global_step += 1
except:
print ("error occur ... skipping ... !")
train_writer.close()
test_writer.close()
def main():
train()
if __name__ == '__main__':
main()
|
kabrapratik28/DeepVideos
|
model/model_GAN_8.py
|
Python
|
apache-2.0
| 38,127
|
class AliasOptions:
def __init__(self, ow_coeff=False, ow_recon=False,
ow_pos=False, ow_intpos=False, ow_hist=False,
ow_dist=False):
self.ow_coeff = ow_coeff
self.ow_recon = ow_recon
self.ow_pos = ow_pos
self.ow_intpos = ow_intpos
self.ow_hist = ow_hist
if ow_hist:
ow_dist = True
self.ow_dist = ow_dist
|
franklongford/alias
|
alias/src/alias_options.py
|
Python
|
bsd-2-clause
| 414
|
#!/usr/bin/python
import os
import re
from setuptools import find_packages, setup
from anchore_engine import version
package_name = "anchore_engine"
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name="anchore_engine",
author="Anchore Inc.",
author_email="dev@anchore.com",
license="Apache License 2.0",
description="Anchore Engine",
long_description=open("README.md").read(),
url="http://www.anchore.com",
python_requires="==3.8.*",
packages=find_packages(exclude=["test", "test.*"]) + ["twisted.plugins"],
version=version.version,
include_package_data=True,
install_requires=requirements,
scripts=[],
entry_points="""
[console_scripts]
anchore-manager=anchore_manager.cli:main_entry
""",
)
|
anchore/anchore-engine
|
setup.py
|
Python
|
apache-2.0
| 802
|
# -*- coding: utf-8 -*-
import scrapy
class DodfSpider(scrapy.Spider):
name = "dodf"
allowed_domains = ["dodf.df.gov.br"]
start_urls = (
'http://www.dodf.df.gov.br/',
)
def parse(self, response):
pass
|
chrmorais/scrapygdf
|
scrapygdf/spiders/dodf.py
|
Python
|
gpl-3.0
| 240
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Api serving config collection service implementation.
Contains the implementation for BackendService as defined in api_backend.py.
"""
try:
import json
except ImportError:
import simplejson as json
import logging
from endpoints import api_backend
from endpoints import api_config
from endpoints import api_exceptions
from protorpc import message_types
__all__ = [
'ApiConfigRegistry',
'BackendServiceImpl',
]
class ApiConfigRegistry(object):
"""Registry of active APIs to be registered with Google API Server."""
def __init__(self):
self.__registered_classes = set()
self.__api_configs = set()
self.__api_methods = {}
def register_spi(self, config_contents):
"""Register a single SPI and its config contents.
Args:
config_contents: String containing API configuration.
"""
if config_contents is None:
return
parsed_config = json.loads(config_contents)
self.__register_class(parsed_config)
self.__api_configs.add(config_contents)
self.__register_methods(parsed_config)
def __register_class(self, parsed_config):
"""Register the class implementing this config, so we only add it once.
Args:
parsed_config: The JSON object with the API configuration being added.
Raises:
ApiConfigurationError: If the class has already been registered.
"""
methods = parsed_config.get('methods')
if not methods:
return
service_classes = set()
for method in methods.itervalues():
rosy_method = method.get('rosyMethod')
if rosy_method and '.' in rosy_method:
method_class = rosy_method.split('.', 1)[0]
service_classes.add(method_class)
for service_class in service_classes:
if service_class in self.__registered_classes:
raise api_config.ApiConfigurationError(
'SPI class %s has already been registered.' % service_class)
self.__registered_classes.add(service_class)
def __register_methods(self, parsed_config):
"""Register all methods from the given api config file.
Methods are stored in a map from method_name to rosyMethod,
the name of the ProtoRPC method to be called on the backend.
If no rosyMethod was specified the value will be None.
Args:
parsed_config: The JSON object with the API configuration being added.
"""
methods = parsed_config.get('methods')
if not methods:
return
for method_name, method in methods.iteritems():
self.__api_methods[method_name] = method.get('rosyMethod')
def lookup_api_method(self, api_method_name):
"""Looks an API method up by name to find the backend method to call.
Args:
api_method_name: Name of the method in the API that was called.
Returns:
Name of the ProtoRPC method called on the backend, or None if not found.
"""
return self.__api_methods.get(api_method_name)
def all_api_configs(self):
"""Return a list of all API configration specs as registered above."""
return list(self.__api_configs)
class BackendServiceImpl(api_backend.BackendService):
"""Implementation of BackendService."""
def __init__(self, api_config_registry, app_revision):
"""Create a new BackendService implementation.
Args:
api_config_registry: ApiConfigRegistry to register and look up configs.
app_revision: string containing the current app revision.
"""
self.__api_config_registry = api_config_registry
self.__app_revision = app_revision
@staticmethod
def definition_name():
"""Override definition_name so that it is not BackendServiceImpl."""
return api_backend.BackendService.definition_name()
def getApiConfigs(self, request):
"""Return a list of active APIs and their configuration files.
Args:
request: A request which may contain an app revision
Returns:
ApiConfigList: A list of API config strings
"""
if request.appRevision and request.appRevision != self.__app_revision:
raise api_exceptions.BadRequestException(
message='API backend app revision %s not the same as expected %s' % (
self.__app_revision, request.appRevision))
configs = self.__api_config_registry.all_api_configs()
return api_backend.ApiConfigList(items=configs)
def logMessages(self, request):
"""Write a log message from the Swarm FE to the log.
Args:
request: A log message request.
Returns:
Void message.
"""
Level = api_backend.LogMessagesRequest.LogMessage.Level
log = logging.getLogger(__name__)
for message in request.messages:
level = message.level if message.level is not None else Level.info
record = logging.LogRecord(name=__name__, level=level.number, pathname='',
lineno='', msg=message.message, args=None,
exc_info=None)
log.handle(record)
return message_types.VoidMessage()
|
taimur97/Feeder
|
server/appengine/endpoints/api_backend_service.py
|
Python
|
gpl-2.0
| 5,564
|
import json
def default_template(title='', js=None, props={}):
from controllers import base
scripts_string = ''
if js is not None:
scripts_string += "<script src='/js/vendor/requirejs/require.js'></script>"
scripts_string += "<script src='/js/require_config.js'></script>"
scripts_string += ("""
<script>
require(['react'], function(React){{
window.React = React;
}});
require(['react', '/js/{0}.js'], function(React, page){{
React.renderComponent(page({1}), document.getElementById('main-page-container'));
}});
</script>
""").format(js, json.dumps(props, cls=base.MyEncoder))
return default_template_string.format(title, scripts_string)
default_template_string = """
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>{0}</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700,300italic,400italic,700italic' rel='stylesheet' type='text/css'>
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet">
<link href='//cdnjs.cloudflare.com/ajax/libs/toastr.js/latest/css/toastr.css' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="/css/main.css">
<script src="/js/vendor/modernizr-2.6.2-respond-1.1.0.min.js"></script>
</script>
</head>
<body>
<!--[if lt IE 8]>
<p class="browsehappy">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<div id="main-page-container" class="container">
</div>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="/js/vendor/jquery/dist/jquery.min.js"><\/script>')</script>
{1}
</body>
</html>
"""
|
zhanrnl/ag
|
webapp/templates.py
|
Python
|
mit
| 2,362
|
""""
Library for the Maxim MAX30100 pulse oximetry system on Raspberry Pi
Based on original C library for Arduino by Connor Huffine/Kontakt
https: // github.com / kontakt / MAX30100
September 2017
"""
import smbus
INT_STATUS = 0x00 # Which interrupts are tripped
INT_ENABLE = 0x01 # Which interrupts are active
FIFO_WR_PTR = 0x02 # Where data is being written
OVRFLOW_CTR = 0x03 # Number of lost samples
FIFO_RD_PTR = 0x04 # Where to read from
FIFO_DATA = 0x05 # Ouput data buffer
MODE_CONFIG = 0x06 # Control register
SPO2_CONFIG = 0x07 # Oximetry settings
LED_CONFIG = 0x09 # Pulse width and power of LEDs
TEMP_INTG = 0x16 # Temperature value, whole number
TEMP_FRAC = 0x17 # Temperature value, fraction
REV_ID = 0xFE # Part revision
PART_ID = 0xFF # Part ID, normally 0x11
I2C_ADDRESS = 0x57 # I2C address of the MAX30100 device
PULSE_WIDTH = {
200: 0,
400: 1,
800: 2,
1600: 3,
}
SAMPLE_RATE = {
50: 0,
100: 1,
167: 2,
200: 3,
400: 4,
600: 5,
800: 6,
1000: 7,
}
LED_CURRENT = {
0: 0,
4.4: 1,
7.6: 2,
11.0: 3,
14.2: 4,
17.4: 5,
20.8: 6,
24.0: 7,
27.1: 8,
30.6: 9,
33.8: 10,
37.0: 11,
40.2: 12,
43.6: 13,
46.8: 14,
50.0: 15
}
def _get_valid(d, value):
try:
return d[value]
except KeyError:
raise KeyError("Value %s not valid, use one of: %s" % (value, ', '.join([str(s) for s in d.keys()])))
def _twos_complement(val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits)
return val
INTERRUPT_SPO2 = 0
INTERRUPT_HR = 1
INTERRUPT_TEMP = 2
INTERRUPT_FIFO = 3
MODE_HR = 0x02
MODE_SPO2 = 0x03
class MAX30100(object):
def __init__(self,
i2c=None,
mode=MODE_HR,
sample_rate=100,
led_current_red=11.0,
led_current_ir=11.0,
pulse_width=1600,
max_buffer_len=10000
):
# Default to the standard I2C bus on Pi.
self.i2c = i2c if i2c else smbus.SMBus(1)
self.set_mode(MODE_HR) # Trigger an initial temperature read.
self.set_led_current(led_current_red, led_current_ir)
self.set_spo_config(sample_rate, pulse_width)
# Reflectance data (latest update)
self.buffer_red = []
self.buffer_ir = []
self.max_buffer_len = max_buffer_len
self._interrupt = None
@property
def red(self):
return self.buffer_red[-1] if self.buffer_red else None
@property
def ir(self):
return self.buffer_ir[-1] if self.buffer_ir else None
def set_led_current(self, led_current_red=11.0, led_current_ir=11.0):
# Validate the settings, convert to bit values.
led_current_red = _get_valid(LED_CURRENT, led_current_red)
led_current_ir = _get_valid(LED_CURRENT, led_current_ir)
self.i2c.write_byte_data(I2C_ADDRESS, LED_CONFIG, (led_current_red << 4) | led_current_ir)
def set_mode(self, mode):
reg = self.i2c.read_byte_data(I2C_ADDRESS, MODE_CONFIG)
self.i2c.write_byte_data(I2C_ADDRESS, MODE_CONFIG, reg & 0x74) # mask the SHDN bit
self.i2c.write_byte_data(I2C_ADDRESS, MODE_CONFIG, reg | mode)
def set_spo_config(self, sample_rate=100, pulse_width=1600):
reg = self.i2c.read_byte_data(I2C_ADDRESS, SPO2_CONFIG)
reg = reg & 0xFC # Set LED pulsewidth to 00
self.i2c.write_byte_data(I2C_ADDRESS, SPO2_CONFIG, reg | pulse_width)
def enable_spo2(self):
self.set_mode(MODE_SPO2)
def disable_spo2(self):
self.set_mode(MODE_HR)
def enable_interrupt(self, interrupt_type):
self.i2c.write_byte_data(I2C_ADDRESS, INT_ENABLE, (interrupt_type + 1)<<4)
self.i2c.read_byte_data(I2C_ADDRESS, INT_STATUS)
def get_number_of_samples(self):
write_ptr = self.i2c.read_byte_data(I2C_ADDRESS, FIFO_WR_PTR)
read_ptr = self.i2c.read_byte_data(I2C_ADDRESS, FIFO_RD_PTR)
return abs(16+write_ptr - read_ptr) % 16
def read_sensor(self):
bytes = self.i2c.read_i2c_block_data(I2C_ADDRESS, FIFO_DATA, 4)
# Add latest values.
self.buffer_ir.append(bytes[0]<<8 | bytes[1])
self.buffer_red.append(bytes[2]<<8 | bytes[3])
# Crop our local FIFO buffer to length.
self.buffer_red = self.buffer_red[-self.max_buffer_len:]
self.buffer_ir = self.buffer_ir[-self.max_buffer_len:]
def shutdown(self):
reg = self.i2c.read_byte_data(I2C_ADDRESS, MODE_CONFIG)
self.i2c.write_byte_data(I2C_ADDRESS, MODE_CONFIG, reg | 0x80)
def reset(self):
reg = self.i2c.read_byte_data(I2C_ADDRESS, MODE_CONFIG)
self.i2c.write_byte_data(I2C_ADDRESS, MODE_CONFIG, reg | 0x40)
def refresh_temperature(self):
reg = self.i2c.read_byte_data(I2C_ADDRESS, MODE_CONFIG)
self.i2c.write_byte_data(I2C_ADDRESS, MODE_CONFIG, reg | (1 << 3))
def get_temperature(self):
intg = _twos_complement(self.i2c.read_byte_data(I2C_ADDRESS, TEMP_INTG))
frac = self.i2c.read_byte_data(I2C_ADDRESS, TEMP_FRAC)
return intg + (frac * 0.0625)
def get_rev_id(self):
return self.i2c.read_byte_data(I2C_ADDRESS, REV_ID)
def get_part_id(self):
return self.i2c.read_byte_data(I2C_ADDRESS, PART_ID)
def get_registers(self):
return {
"INT_STATUS": self.i2c.read_byte_data(I2C_ADDRESS, INT_STATUS),
"INT_ENABLE": self.i2c.read_byte_data(I2C_ADDRESS, INT_ENABLE),
"FIFO_WR_PTR": self.i2c.read_byte_data(I2C_ADDRESS, FIFO_WR_PTR),
"OVRFLOW_CTR": self.i2c.read_byte_data(I2C_ADDRESS, OVRFLOW_CTR),
"FIFO_RD_PTR": self.i2c.read_byte_data(I2C_ADDRESS, FIFO_RD_PTR),
"FIFO_DATA": self.i2c.read_byte_data(I2C_ADDRESS, FIFO_DATA),
"MODE_CONFIG": self.i2c.read_byte_data(I2C_ADDRESS, MODE_CONFIG),
"SPO2_CONFIG": self.i2c.read_byte_data(I2C_ADDRESS, SPO2_CONFIG),
"LED_CONFIG": self.i2c.read_byte_data(I2C_ADDRESS, LED_CONFIG),
"TEMP_INTG": self.i2c.read_byte_data(I2C_ADDRESS, TEMP_INTG),
"TEMP_FRAC": self.i2c.read_byte_data(I2C_ADDRESS, TEMP_FRAC),
"REV_ID": self.i2c.read_byte_data(I2C_ADDRESS, REV_ID),
"PART_ID": self.i2c.read_byte_data(I2C_ADDRESS, PART_ID),
}
|
feroda/play-nao
|
LifeIsNAO-bot/hrmon/max30100.py
|
Python
|
agpl-3.0
| 6,530
|
from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'Fango.accounts'
|
Niemzok/fango
|
Fango/accounts/apps.py
|
Python
|
mit
| 138
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update chassis command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from chassistest import VerifyChassisMixin
class TestUpdateChassis(TestBrokerCommand, VerifyChassisMixin):
def test_100_update_ut3c5(self):
ip = self.net.unknown[0].usable[6]
self.dsdb_expect_add("ut3c5.aqd-unittest.ms.com", ip, "oa",
comments="Some new chassis comments")
command = ["update", "chassis", "--chassis", "ut3c5.aqd-unittest.ms.com",
"--rack", "ut3", "--serial", "ABC5678",
"--model", "c-class", "--ip", ip,
"--comments", "Some new chassis comments"]
self.noouttest(command)
def test_110_verify_ut3c5(self):
self.verifychassis("ut3c5.aqd-unittest.ms.com", "hp", "c-class",
"ut3", "a", "3", "ABC5678",
comments="Some new chassis comments",
ip=self.net.unknown[0].usable[6])
def test_200_update_bad_ip(self):
ip = self.net.unknown[0].usable[6]
command = ["update", "chassis", "--ip", ip,
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"IP address %s is already in use by on-board admin "
"interface oa of chassis "
"ut3c5.aqd-unittest.ms.com." % ip,
command)
def test_200_update_bad_model(self):
command = ["update", "chassis", "--model", "uttorswitch",
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Model uttorswitch, machine_type chassis not found.",
command)
def test_200_not_chassis(self):
command = ["update", "chassis", "--chassis",
"ut3gd1r01.aqd-unittest.ms.com",
"--comments", "Not a chassis"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Switch ut3gd1r01.aqd-unittest.ms.com exists, but "
"is not a chassis.",
command)
def test_200_no_model(self):
command = ["update", "chassis", "--vendor", "generic",
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Model utchassis, vendor generic, "
"machine_type chassis not found.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateChassis)
unittest.TextTestRunner(verbosity=2).run(suite)
|
stdweird/aquilon
|
tests/broker/test_update_chassis.py
|
Python
|
apache-2.0
| 3,613
|
from __future__ import unicode_literals
from prompt_toolkit.utils import take_using_weights
import unittest
import itertools
class SplitLinesTest(unittest.TestCase):
def test_using_weights(self):
def take(generator, count):
return list(itertools.islice(generator, 0, count))
# Check distribution.
data = take(take_using_weights(['A', 'B', 'C'], [5, 10, 20]), 35)
self.assertEqual(data.count('A'), 5)
self.assertEqual(data.count('B'), 10)
self.assertEqual(data.count('C'), 20)
self.assertEqual(data,
['A', 'B', 'C', 'C', 'B', 'C', 'C', 'A', 'B', 'C', 'C', 'B', 'C',
'C', 'A', 'B', 'C', 'C', 'B', 'C', 'C', 'A', 'B', 'C', 'C',
'B', 'C', 'C', 'A', 'B', 'C', 'C', 'B', 'C', 'C'])
# Another order.
data = take(take_using_weights(['A', 'B', 'C'], [20, 10, 5]), 35)
self.assertEqual(data.count('A'), 20)
self.assertEqual(data.count('B'), 10)
self.assertEqual(data.count('C'), 5)
# Bigger numbers.
data = take(take_using_weights(['A', 'B', 'C'], [20, 10, 5]), 70)
self.assertEqual(data.count('A'), 40)
self.assertEqual(data.count('B'), 20)
self.assertEqual(data.count('C'), 10)
# Negative numbers.
data = take(take_using_weights(['A', 'B', 'C'], [-20, 10, 0]), 70)
self.assertEqual(data.count('A'), 0)
self.assertEqual(data.count('B'), 70)
self.assertEqual(data.count('C'), 0)
|
niklasf/python-prompt-toolkit
|
tests/utils_tests/__init__.py
|
Python
|
bsd-3-clause
| 1,508
|
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
import time
import datetime
import numbers
import bisect
import numpy
from mantid.api import * # PythonAlgorithm, AlgorithmFactory, WorkspaceProperty
from mantid.kernel import * # StringArrayProperty
from mantid.simpleapi import * # needed for Load
class LoadLogPropertyTable(PythonAlgorithm):
def summary(self):
""" Return summary
"""
return "Creates a table of Run number against the log values for that run for a range of files.\
It can use a single log value or a list of log values."
# same concept as built in "CreateLogPropertyTable" but loads its own workspaces and needn't hold all in memory at once
# select log values to put in table (list)
# special cases for:
# beamlog_(counts, frames, etc): last few points end up in next run's log. Find Maximum.
# comment (separate function)
# time series, take average for t>0 (if available)
def PyInit(self):
self.declareProperty(FileProperty(name="FirstFile",defaultValue="",action=FileAction.Load,extensions = ["nxs","raw"]),
"The first file to load from")
self.declareProperty(FileProperty(name="LastFile",defaultValue="",action=FileAction.Load,extensions = ["nxs","raw"]),
"The Last file to load from, must be in the same directory, all files in between will also be used")
self.declareProperty(StringArrayProperty("LogNames",direction=Direction.Input),
"The comma seperated list of properties to include. \n"+
"The full list will be printed if an invalid value is used.")
self.declareProperty(WorkspaceProperty("OutputWorkspace","",Direction.Output),"Table of results")
def category(self):
return "DataHandling\\Logs;Muon\\DataHandling"
def getGeneralLogValue(self,ws,name,begin):
# get log value
# average time series over run
# for beamlog, etc return flag=true and value to push into previous run
if name=="comment":
return (ws.getComment(),False,0)
try:
v=ws.getRun().getProperty(name)
except:
possibleLogs = list(ws.getRun().keys())
possibleLogs.insert(0,'comment')
message = "The log name '" + name + "' was not found, possible choices are: " + str(possibleLogs)
raise ValueError(message)
try:
times2=[]
if hasattr(v,"unfiltered"):
v=v.unfiltered()
for tt in v.times:
times2.append((datetime.datetime(*(time.strptime(str(tt),"%Y-%m-%dT%H:%M:%S")[0:6]))-begin).total_seconds())
except: #pylint: disable=bare-except
# print "probably not a time series"
pass
if name[0:8]=="Beamlog_" and (name.find("Counts")>0 or name.find("Frames")>0):
i=bisect.bisect_right(times2,2) # allowance for "slow" clearing of DAE
#print "returning max beam log, list cut 0:",i,":",len(times2)
return (numpy.amax(v.value[i:]),True,numpy.amax(v.value[:i]))
if v.__class__.__name__ =="TimeSeriesProperty_dbl" or v.__class__.__name__ =="FloatTimeSeriesProperty":
i=bisect.bisect_left(times2,0)
return (numpy.average(v.value[i:]),False,0)
return (v.value,False,0)
#pylint: disable=too-many-branches
def PyExec(self):
firstFileName=self.getProperty("FirstFile").value
lastFileName=self.getProperty("LastFile").value
firstRunNum, firstFileFirstDigit,firstFileLastDigit = self.getRunNumber(firstFileName)
lastRunNum, lastFileFirstDigit,LastFileLastDigit = self.getRunNumber(lastFileName)
if firstFileName[:lastFileFirstDigit] != lastFileName[:lastFileFirstDigit]:
raise Exception("Files from different directories or instruments")
if firstFileName[firstFileName.rindex('.')] != lastFileName[firstFileName.rindex('.')]:
raise Exception("Files of different types")
if firstFileLastDigit-firstFileFirstDigit != LastFileLastDigit-lastFileFirstDigit:
raise Exception("File numbering error")
if lastRunNum < firstRunNum:
raise Exception("Run numbers must increase")
# table. Rows=runs, columns=logs (col 0 = run number)
collist=self.getProperty("LogNames").value
wsOutput=WorkspaceFactory.createTable()
wsOutput.addColumn("int","RunNumber")
# loop and load files. Absolute numbers for now.
for loopRunNum in range(firstRunNum,lastRunNum+1):
# create a file path for intervening files, based from the 1st filename
thispath=firstFileName[:firstFileFirstDigit] + \
str(loopRunNum).zfill(firstFileLastDigit-firstFileFirstDigit) + \
firstFileName[firstFileLastDigit:]
loadedWs = self.loadMetaData(thispath)
if loadedWs is None:
continue
#check if the ws is a group
ws = loadedWs
if ws.id() == 'WorkspaceGroup':
ws=ws[0]
begin=datetime.datetime(*(time.strptime(ws.getRun().getProperty("run_start").value,"%Y-%m-%dT%H:%M:%S")[0:6])) # start of day
vallist=[loopRunNum]
for col in collist:
try:
(colValue, leftover, lval)=self.getGeneralLogValue(ws, col, begin)
except ValueError:
# this is a failure to find the named log
raise
vallist.append(colValue)
if loopRunNum==firstRunNum:
if isinstance(colValue, numbers.Number):
wsOutput.addColumn("double",col)
else:
wsOutput.addColumn("str",col)
if leftover and loopRunNum>firstRunNum:
if lval>wsOutput.cell(col,loopRunNum-firstRunNum-1):
wsOutput.setCell(col,loopRunNum-firstRunNum-1, lval)
wsOutput.addRow(vallist)
self.setProperty("OutputWorkspace",wsOutput)
def loadMetaData(self, thispath):
loadedWs = None
try:
loadAlg = self.createChildAlgorithm('Load')
#set Filename first
loadAlg.setProperty('Filename', thispath)
loadAlg.setProperty('OutputWorkspace', '__CopyLogsTmp')
try:
#try to set MetaDataOnly
loadAlg.setProperty('MetaDataOnly', True)
except (ValueError,RuntimeError):
#If that fails set SpectrumMin and SpectrumMax
loadAlg.setProperty('SpectrumMin', 1)
loadAlg.setProperty('SpectrumMax', 1)
loadAlg.execute()
outWSPropName = 'OutputWorkspace'
try:
loadedWs = loadAlg.getProperty(outWSPropName).value
except RuntimeError:
raise RuntimeError("No output workspace for " + thispath)
except (ValueError,RuntimeError):
return None
return loadedWs
def getRunNumber(self, fileName):
# Find last . and step back until you find a digit
lastDigitIndex = fileName.rindex('.')
while not fileName[lastDigitIndex - 1].isdigit():
lastDigitIndex -= 1
# Keep going back until you find the start of the number sequence
firstDigitIndex = lastDigitIndex - 1
while fileName[firstDigitIndex - 1].isdigit():
firstDigitIndex -= 1
runNumber = int(fileName[firstDigitIndex:lastDigitIndex])
return runNumber, firstDigitIndex, lastDigitIndex
AlgorithmFactory.subscribe(LoadLogPropertyTable())
|
wdzhou/mantid
|
Framework/PythonInterface/plugins/algorithms/LoadLogPropertyTable.py
|
Python
|
gpl-3.0
| 7,841
|
##############################################################################
#
# Copyright (c) 2008-2012 SIA "Micronaet s.r.l." (http://www.micronaet.it)
# All Rights Reserved.
# General contacts <riolini@micronaet.it>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
def get_array_list(self, item_id, side, is_order):
''' Calculate array for dental position: u(p) or d(own)
at least 1 line is returned (for graphic rapresentation)
item_id: id of partner (is_order=False) or id of order (is_order=True)
side: U or D, stand for Up or Down
is_order: True of False (for select res.partner obj or res.order)
return: array list of represented operations
'''
# Function for this procedure:
def get_id(tooth):
''' return position of tooth in line ex.:
11 - 18 (from 1 to 8) also for 51 - 55
21 - 28 (from 9 to 17) also for 61 - 65
T (is 0)
31 - 38 (from 1 to 8) also for 71 - 75
41 - 48 (from 9 to 17) also for 81 - 85
'''
if tooth in ("*", "up", "down"): # All teeth (upper or lower side)
return 0
elif tooth[:1] in ("1", "4", "5", "8"):
return int(tooth[1:2])
elif tooth[:1] in ("2", "3", "6", "7"):
return 8 + int(tooth[1:2])
# Start procedure:
total_list=[] # list that is represented on dental schema report
# T 8 7 6 5 4 3 2 1 | 1 2 3 4 5 6 7 8
level=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,] # max level for tooth
max_level=0 # level number max (for indexing lines)
side_description={'up': " (sup.)", 'down': " (inf.)"}
if is_order:
# TODO
tooth_ids=self.pool.get('sale.order.line').search(self.cr, self.uid, [('order_id', '=', item_id),('discarded','=',False)])
tooth_list_ids = self.pool.get('sale.order.line').browse(self.cr, self.uid, tooth_ids)
else: # is partner
tooth_ids = self.pool.get('dentist.operation').search(self.cr, self.uid, [('partner_id', '=', item_id),])
tooth_list_ids = self.pool.get('dentist.operation').browse(self.cr, self.uid, tooth_ids)
for tooth in tooth_list_ids:
if tooth.tooth: # jump if no teeth!
if (side.upper()=="U" and (tooth.tooth[:1] in ("u", "1", "2", "5", "6", "*",))) or (side.upper()=="D" and (tooth.tooth[:1] in ("d", "3", "4", "7", "8",))):
pos = get_id(tooth.tooth)
level[pos] += 1
current_level = level[pos]
if level[pos] > max_level:
empty=['', '','','','','','','','', '','','','','','','','',] # empty line for append
total_list.append(empty)
max_level=current_level
if is_order:
tooth_date = ""
else:
tooth_date = tooth.date and "%s-%s"%(tooth.date[5:7],tooth.date[2:4]) # tooth.date[8:10],
one_side = side_description[tooth.tooth] if tooth.tooth in ("up", "down") else ""
if tooth.tooth[:1] in ("5", "6", "7", "8"): # milk teeth
code_tooth = "%s%s*"%(tooth.product_id.code.lower(), one_side) if is_order else "%s*%s\n%s"%(tooth.product_id.code.lower(), one_side, tooth_date)
else: # normal teeth
code_tooth = "%s%s"%(tooth.product_id.code.upper(), one_side) if is_order else "%s%s\n%s"%(tooth.product_id.code.upper(), one_side, tooth_date)
total_list[current_level-1][pos]=code_tooth
else: # TODO raise some error?
pass
if max_level==0:
total_list.append(['', '','','','','','','','', '','','','','','','','',]) # return a blank line
total_list.reverse()
return total_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cherrygirl/micronaet7
|
dentist/report/dental_schema.py
|
Python
|
agpl-3.0
| 5,074
|
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
hold1 = hold2 = -sys.maxint -1
release1 = release2 = 0
for p in prices:
release2 = max(release2,hold2+p)
hold2 = max(hold2, release1-p)
release1 = max(release1, hold1+p)
hold1 = max(hold1,-p)
return release2
|
saai/LeetcodePythonSolutions
|
array/maxProfit3.py
|
Python
|
mit
| 429
|
from abc import abstractmethod
from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.contrib.admin.helpers import InlineAdminFormSet
from django.contrib.contenttypes.admin import GenericInlineModelAdmin
from django.core.exceptions import ImproperlyConfigured
from django.db.models import signals
from django.dispatch import receiver
from django.template import RequestContext
from django.template.loader import render_to_string
from django.urls import path
from django.utils import translation
from fluent_utils.ajax import JsonResponse
from fluent_contents import extensions
from fluent_contents.admin.contentitems import BaseContentItemFormSet, get_content_item_inlines
from fluent_contents.admin.genericextensions import BaseInitialGenericInlineFormSet
from fluent_contents.models import Placeholder
from fluent_contents.models.managers import get_parent_active_language_choices
try:
from functools import partialmethod # Python 3
except ImportError:
from django.utils.functional import curry as partialmethod
class PlaceholderInlineFormSet(BaseInitialGenericInlineFormSet):
# Most logic happens in the generic base class
def __init__(self, *args, **kwargs):
self._instance_languages = None
# kwargs['prefix'] = 'placeholder_fs'
super().__init__(*args, **kwargs)
@classmethod
def get_default_prefix(cls):
# Make output less verbose, easier to read, and less kB to transmit.
return "placeholder-fs"
@property
def other_instance_languages(self):
return get_parent_active_language_choices(self.instance, exclude_current=True)
class PlaceholderEditorInline(GenericInlineModelAdmin):
"""
The placeholder editor, implemented as an admin inline.
It displays tabs for each inline placeholder, and displays :class:`~fluent_contents.models.ContentItem` plugins in the tabs.
It should be inserted in the ``ModelAdmin.inlines`` before the inlines that
the :func:`~fluent_contents.admin.get_content_item_inlines` function generates.
The ContentItem inlines look for the ``Placeholder`` object that was created just before their invocation.
To fetch the initial data, the inline will attempt to find the parent model,
and call :func:`~PlaceholderEditorBaseMixin.get_placeholder_data`.
When the admin models inherit from :class:`~fluent_contents.admin.PlaceholderEditorAdmin`
or :class:`~fluent_contents.admin.PlaceholderFieldAdmin` this will be setup already.
"""
model = Placeholder
formset = PlaceholderInlineFormSet # Important part of the class!
ct_field = "parent_type"
ct_fk_field = "parent_id"
template = "admin/fluent_contents/placeholder/inline_tabs.html"
extra = 0
is_fluent_editor_inline = True # Allow admin templates to filter the inlines
class Media:
# cp_tabs.js is included here, as it's a presentation choice
# to display the placeholder panes in a tabbar format.
# The remaining scripts should just operate the same without it.
js = (
"admin/js/vendor/jquery/jquery{}.js".format("" if settings.DEBUG else ".min"),
"admin/js/jquery.init.js",
"fluent_contents/admin/vendor/Sortable.js",
"fluent_contents/admin/jquery.cookie.js",
"fluent_contents/admin/cp_admin.js",
"fluent_contents/admin/cp_data.js",
"fluent_contents/admin/cp_tabs.js",
"fluent_contents/admin/cp_plugins.js",
"fluent_contents/admin/cp_widgets.js",
"fluent_contents/admin/fluent_contents.js",
)
# if 'grapelli' in settings.INSTALLED_APPS:
# ...
if "classic_theme" in settings.INSTALLED_APPS:
css = {
"screen": (
"fluent_contents/admin/cp_admin.css",
"fluent_contents/admin/cp_admin_classic.css",
)
}
else:
css = {
"screen": (
"fluent_contents/admin/cp_admin.css",
"fluent_contents/admin/cp_admin_flat.css",
)
}
extend = False # No need for the standard 'admin/js/inlines.min.js' here.
def get_all_allowed_plugins(self):
"""
Return *all* plugin categories which can be used by placeholder content.
This is the sum of all allowed plugins by the various slots on the page.
It accesses the parent :class:`PlaceholderEditorBaseMixin` by default to request the information.
This field is used in the template.
"""
return self._get_parent_modeladmin().get_all_allowed_plugins()
def get_formset(self, request, obj=None, **kwargs):
"""
Pre-populate formset with the initial placeholders to display.
"""
def _placeholder_initial(p):
# p.as_dict() returns allowed_plugins too for the client-side API.
return {"slot": p.slot, "title": p.title, "role": p.role}
# Note this method is called twice, the second time in get_fieldsets() as `get_formset(request).form`
initial = []
if request.method == "GET":
placeholder_admin = self._get_parent_modeladmin()
# Grab the initial data from the parent PlaceholderEditorBaseMixin
data = placeholder_admin.get_placeholder_data(request, obj)
initial = [_placeholder_initial(d) for d in data]
# Order initial properly,
# Inject as default parameter to the constructor
# This is the BaseExtendedGenericInlineFormSet constructor
FormSetClass = super().get_formset(request, obj, **kwargs)
FormSetClass.__init__ = partialmethod(FormSetClass.__init__, initial=initial)
return FormSetClass
def _get_parent_modeladmin(self):
# HACK: accessing private field.
try:
parentadmin = self.admin_site._registry[self.parent_model]
except KeyError:
raise ImproperlyConfigured(
"Model admin for '{}' not found in admin_site!".format(self.parent_model.__name__)
)
# Do some "type" checking to developers are aided in inheriting their parent ModelAdmin screens with the proper classes.
assert isinstance(
parentadmin, PlaceholderEditorBaseMixin
), "The '{}' class can only be used in admin screens which implement a PlaceholderEditor mixin class.".format(
self.__class__.__name__
)
return parentadmin
class PlaceholderEditorBaseMixin:
"""
Base interface/mixin for a :class:`~django.contrib.admin.ModelAdmin` to provide the :class:`PlaceholderEditorInline` with initial data.
This class is implemented by the :class:`PlaceholderEditorAdmin` and :class:`~fluent_contents.admin.PlaceholderFieldAdmin` classes.
"""
@abstractmethod
def get_placeholder_data(self, request, obj=None):
"""
Return the placeholders that the editor should initially display.
The function should return a list of :class:`~fluent_contents.models.PlaceholderData` classes.
These classes can either be instantiated manually, or read from a template
using the :ref:`fluent_contents.analyzer` module for example.
"""
# This information will be read by the PlaceholderEditorInline,
# but it could also be reused by other derived classes off course.
raise NotImplementedError(
"The '{}' subclass should implement get_placeholder_data().".format(
self.__class__.__name__
)
)
def get_all_allowed_plugins(self):
"""
Return all plugin categories which can be used by placeholder content.
By default, all plugins are allowed. Individual slot names may further limit the plugin set.
:rtype: list of :class:`~fluent_contents.extensions.ContentPlugin`
"""
return extensions.plugin_pool.get_plugins()
class PlaceholderEditorAdmin(PlaceholderEditorBaseMixin, ModelAdmin):
"""
The base functionality for :class:`~django.contrib.admin.ModelAdmin` dialogs to display a placeholder editor with plugins.
It loads the inlines using :func:`get_extra_inlines`.
It loads the :class:`PlaceholderEditorInline`, which displays each placeholder in separate tabs:
.. image:: /images/admin/placeholdereditoradmin2.png
:width: 755px
:height: 418px
:alt: django-fluent-contents placeholder editor preview
"""
placeholder_inline = PlaceholderEditorInline
def get_inline_instances(self, request, *args, **kwargs):
"""
Create the inlines for the admin, including the placeholder and contentitem inlines.
"""
inlines = super().get_inline_instances(request, *args, **kwargs)
extra_inline_instances = []
inlinetypes = self.get_extra_inlines()
for InlineType in inlinetypes:
inline_instance = InlineType(self.model, self.admin_site)
extra_inline_instances.append(inline_instance)
return extra_inline_instances + inlines
def get_extra_inlines(self):
"""
Return the extra inlines for the placeholder editor.
It loads the :attr:`placeholder_inline` first, followed by the inlines for the :class:`~fluent_contents.models.ContentItem` classes.
"""
return [self.placeholder_inline] + get_content_item_inlines(
plugins=self.get_all_allowed_plugins()
)
def get_urls(self):
urls = super().get_urls()
opts = self.model._meta
info = opts.app_label, opts.model_name
return [
path(
"<int:object_id>/api/get_placeholder_data/",
self.admin_site.admin_view(self.get_placeholder_data_view),
name="{}_{}_get_placeholder_data".format(*info),
)
] + urls
def get_placeholder_data_view(self, request, object_id):
"""
Return the placeholder data as dictionary.
This is used in the client for the "copy" functionality.
"""
language = "en" # request.POST['language']
with translation.override(
language
): # Use generic solution here, don't assume django-parler is used now.
obj = self.get_object(request, object_id)
if obj is None:
json = {"success": False, "error": "Page not found"}
status = 404
elif not self.has_change_permission(request, obj):
json = {"success": False, "error": "No access to page"}
status = 403
else:
# Fetch the forms that would be displayed,
# return the data as serialized form data.
status = 200
json = {
"success": True,
"object_id": object_id,
"language_code": language,
"formset_forms": self._get_object_formset_data(request, obj),
}
return JsonResponse(json, status=status)
def _get_object_formset_data(self, request, obj):
inline_instances = self.get_inline_instances(request, obj)
placeholder_slots = dict(Placeholder.objects.parent(obj).values_list("id", "slot"))
all_forms = []
formsets_with_inlines = self.get_formsets_with_inlines(request, obj=obj)
for FormSet, inline in formsets_with_inlines:
# Only ContentItem inlines
if isinstance(inline, PlaceholderEditorInline) or not getattr(
inline, "is_fluent_editor_inline", False
):
continue
formset_forms = self._get_contentitem_formset_html(
request, obj, FormSet, inline, placeholder_slots
)
if formset_forms:
all_forms.extend(formset_forms)
# Flatten list, sorted on insertion for the client.
all_forms.sort(key=lambda x: (x["placeholder_slot"], x["sort_order"]))
return all_forms
def _get_contentitem_formset_html(self, request, obj, FormSet, inline, placeholder_slots):
# Passing serialized object fields to the client doesn't work,
# as some form fields (e.g. picture field or MultiValueField) have a different representation.
# The only way to pass a form copy to the client is by actually rendering it.
# Hence, emulating change_view code here:
queryset = inline.get_queryset(request)
formset = FormSet(instance=obj, prefix="", queryset=queryset)
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline.extra = 0
inline_admin_formset = InlineAdminFormSet(
inline, formset, fieldsets, prepopulated, readonly, model_admin=self
)
form_data = []
for i, inline_admin_form in enumerate(inline_admin_formset):
if inline_admin_form.original is None: # The extra forms
continue
# exactly what admin/fluent_contents/contentitem/inline_container.html does:
template_name = inline_admin_formset.opts.cp_admin_form_template
context = {
"inline_admin_form": inline_admin_form,
"inline_admin_formset": inline_admin_formset,
"original": obj,
"object_id": obj.pk,
"add": False,
"change": True,
"has_change_permission": True,
}
context = RequestContext(request, context)
form_html = render_to_string(template_name, context.flatten())
# Append to list with metadata included
contentitem = inline_admin_form.original
form_data.append(
{
"contentitem_id": contentitem.pk,
"sort_order": contentitem.sort_order,
"placeholder_id": contentitem.placeholder_id,
"placeholder_slot": placeholder_slots[contentitem.placeholder_id],
"html": form_html,
"plugin": inline.plugin.__class__.__name__,
"model": inline.model.__name__,
"prefix": formset.add_prefix(i),
}
)
return form_data
def save_formset(self, request, form, formset, change):
# Track deletion of Placeholders across the formsets.
# When a Placeholder is deleted, the ContentItem can't be saved anymore with the old placeholder_id
# That ID did exist at the beginning of the transaction, but won't be when all forms are saved.
# Pass the knowledge of deleted placeholders to the ContentItem formset, so it can deal with it.
if isinstance(formset, BaseContentItemFormSet):
formset._deleted_placeholders = getattr(request, "_deleted_placeholders", ())
saved_instances = super().save_formset(request, form, formset, change)
if isinstance(formset, PlaceholderInlineFormSet):
request._deleted_placeholders = [obj._old_pk for obj in formset.deleted_objects]
return saved_instances
@receiver(signals.post_delete, sender=Placeholder)
def _get_pk_on_placeholder_delete(instance, **kwargs):
# Make sure the old PK can still be tracked
instance._old_pk = instance.pk
|
django-fluent/django-fluent-contents
|
fluent_contents/admin/placeholdereditor.py
|
Python
|
apache-2.0
| 15,521
|
import sys
import numpy as np
from PyQt4 import QtCore, QtGui
from pyhrf.ndarray import xndarray
from cuboid_browser_ui import Ui_xndarrayBrowser
from axis_slicer import AxisSlicer
from ui_base import DomainValue
#pyuic4 cuboid_browser.ui -o cuboid_browser_ui.py
class LimitedFifo(list):
""" Implement a First-In First-Out container with a limited capacity.
If an item is added beyond max capacity, then the first added item is
removed
"""
def __init__(self, max_size, on_pop=None):
"""
Create a LimitedFifo instance with max_size as the limited capacity.
A callback can be called when an item is removed (see arg on_pop)
Args:
- max_size (int): the maximum capacity of the FIFO
- on_pop (function): the function to apply on a removed item.
"""
self.max_size = max_size
self.on_pop = on_pop
def append(self, o):
list.append(self,o)
if self.__len__() > self.max_size:
self.pop(0)
def pop(self, i):
e = self.__getitem__(i)
list.pop(self, i)
if self.on_pop is not None:
self.on_pop(e)
def swap(self, i, j):
"""
Swap the two elements defined by the given indexes, if they exist.
Produce True if swapping occured, else False
Args:
- i (int>=0): the index of the first item to swap
- j (int>=0): the index of the second item to swap
Return:
bool -> True if swapping occured, else False
"""
if i < len(self) and j < len(self):
self[i], self[j] = self[j], self[i]
return True
return False
class xndarrayBrowser(QtGui.QWidget):
"""
Signals:
- slice_changed(dict, tuple): emitted when the current slice changed Args are:
- (dict of <axis name (str)>:<DomainValue>): slice value for
each axis
- (tuple of <axis name (str)>): list of projected axes
"""
slice_changed = QtCore.pyqtSignal(dict, tuple, name='SliceChanged')
# projected_axes_changed = QtCore.pyqtSignal(name='ProjectedAxesChanged')
# projected_axes_swapped = QtCore.pyqtSignal(name='ProjectedAxesSwapped')
closing = QtCore.pyqtSignal(name='Closing')
def __init__(self, cuboid, name, parent=None):
"""
Args:
"""
QtGui.QWidget.__init__(self, parent)
self.cuboid_name = name
self.ui = Ui_xndarrayBrowser()
self.ui.setupUi(self)
domains = cuboid.get_axes_domains()
anames = cuboid.axes_names
self.slicers = dict((an,AxisSlicer(an, domains[an])) \
for an in anames)
self.slice_def = dict((an, domains[an][0]) for an in anames)
for an in anames:
slicer = self.slicers[an]
self.ui.verticalLayout.addWidget(slicer)
slicer._slice_value_changed.connect(self.handle_slice_value_changed)
slicer.axis_state_changed.connect(self.set_axis_state)
self.selection_fifo_max_size = 2
self.selection_fifo = LimitedFifo(self.selection_fifo_max_size,
on_pop=self.deselect_axis)
checked = QtCore.Qt.Checked
self.slicers[anames[0]].set_axis_selection_checkbox_state(checked)
if len(anames) > 1:
self.slicers[anames[1]].set_axis_selection_checkbox_state(checked)
self.ui.swap_button.clicked.connect(self.swap_current_axes)
def set_new_cuboid(self, c):
"""
Set the current cuboid to c, while trying to maintain the current slice
definition
"""
print 'todo: set_new_cuboid:'
print c.descrip()
print 'current cuboid is:'
print c.descrip()
def get_slice_def(self):
return self.slice_def
def set_slice_value(self, axis, value):
self.slicers[axis].set_slice_value(axis, value)
def get_current_axes(self):
return tuple(str(a) for a in self.selection_fifo)
@QtCore.pyqtSlot(str, DomainValue)
def handle_slice_value_changed(self, aname, dval):
self.slice_def[str(aname)] = dval
self.slice_changed.emit(self.slice_def, self.get_current_axes())
@QtCore.pyqtSlot(str, bool)
def set_axis_state(self, axis_name, state):
print 'set_axis_state:', axis_name, '->', state
print 'current selection_fifo:', self.selection_fifo
state_changed = False
if not state:
if axis_name in self.selection_fifo:
self.selection_fifo.remove(axis_name)
state_changed = True
else:
if axis_name not in self.selection_fifo:
self.selection_fifo.append(axis_name)
state_changed = True
if state_changed:
self.update_current_axis_labels()
print 'emit slice_changed:', self.slice_def, '|| ca:', self.get_current_axes()
self.slice_changed.emit(self.slice_def, self.get_current_axes())
def update_current_axis_labels(self):
if len(self.selection_fifo) == 0:
self.ui.label_axis_1.setText('')
self.ui.label_axis_2.setText('')
elif len(self.selection_fifo) == 1:
self.ui.label_axis_1.setText(self.selection_fifo[0])
self.ui.label_axis_2.setText('')
else:
self.ui.label_axis_1.setText(self.selection_fifo[0])
self.ui.label_axis_2.setText(self.selection_fifo[1])
def deselect_axis(self, axis_name):
unchecked = QtCore.Qt.Unchecked
self.slicers[str(axis_name)].set_axis_selection_checkbox_state(unchecked)
def swap_current_axes(self):
if self.selection_fifo.swap(0,1):
self.slice_changed.emit(self.slice_def, self.get_current_axes())
self.update_current_axis_labels()
def closeEvent(self, event):
"""
Override cloveEvent to emit a signal just before the widget is actually
closed.
"""
self.closing.emit()
QtGui.QWidget.closeEvent(self, event)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
sh = (10,10,5,3)
c1 = xndarray(np.arange(np.prod(sh)).reshape(sh),
axes_names=['sagittal','coronal','axial','condition'],
axes_domains={'condition':['audio1','audio2', 'video']})
sh = (10,10,5,4)
c2 = xndarray(np.arange(np.prod(sh)).reshape(sh),
axes_names=['sagittal','coronal','axial','condition'],
axes_domains={'condition':['video','sentence',
'audio2','audio1']})
cb1 = xndarrayBrowser(c1, 'c1')
cb2 = xndarrayBrowser(c2, 'c2')
cb1.show()
cb2.show()
from ui_base import SignalPrinter
cb1.slice_changed.connect(SignalPrinter('cb1 slice changed ->'))
cb2.slice_changed.connect(SignalPrinter('cb2 slice changed ->'))
cb1.closing.connect(SignalPrinter('cb1 closing'))
cb2.closing.connect(SignalPrinter('cb2 closing'))
sys.exit(app.exec_())
|
philouc/pyhrf
|
python/pyhrf/viewer/cuboid_browser.py
|
Python
|
gpl-3.0
| 7,136
|
# Copyright (C) 2011-2012 Alexander Shorin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Customized by Gabor Pap
# 2014
import logging
import sys
from math import isinf, isnan
from decimal import Decimal
from struct import pack, unpack
version = '.'.join(map(str, sys.version_info[:2]))
if version >= '3.0':
from io import BytesIO
basestring = (str, bytes)
unicode = str
bytes = bytes
long = int
xrange = range
d = {}
dict_keysiterator = type(d.keys())
dict_valuesiterator = type(d.values())
dict_itemsiterator = type(d.items())
else:
from cStringIO import StringIO as BytesIO
basestring = basestring
unicode = unicode
b = bytes = str
long = long
xrange = xrange
d = {}
dict_keysiterator = type(d.iterkeys())
dict_valuesiterator = type(d.itervalues())
dict_itemsiterator = type(d.iteritems())
b = lambda s: isinstance(s, unicode) and s.encode('latin1') or s
u = lambda s: isinstance(s, bytes) and s.decode('utf-8') or s
XRangeType = type(xrange(0))
LOG = logging.getLogger(__name__)
NOOP_SENTINEL = type('NoOp', (object,), {'__slots__': ()})()
MIXED = b('M')
NOOP = b('N')
EOS = b('E')
NULL = b('Z')
FALSE = b('F')
TRUE = b('T')
INT8 = b('B')
INT16 = b('i')
INT32 = b('I')
INT64 = b('L')
FLOAT = b('d')
DOUBLE = b('D')
STRING_S = b('s')
STRING_L = b('S')
HIDEF_S = b('h')
HIDEF_L = b('H')
ARRAY_S = b('a')
OBJECT_S = b('o')
ARRAY_L = b('A')
OBJECT_L = b('O')
FF = b(chr(255))
BOS_A = object()
BOS_O = object()
CONSTANTS = set([NOOP, EOS, NULL, FALSE, TRUE])
NUMBERS = set([INT8, INT16, INT32, INT64, FLOAT, DOUBLE])
STRINGS = set([STRING_S, STRING_L, HIDEF_S, HIDEF_L])
SHORT_OBJ = set([STRING_S, HIDEF_S, ARRAY_S, OBJECT_S])
LARGE_OBJ = set([STRING_L, HIDEF_L, ARRAY_L, OBJECT_L])
STREAMS = set([ARRAY_S, OBJECT_S])
OBJECT_KEYS = set([STRING_S, STRING_L])
FORBIDDEN = set([NOOP, EOS])
CHARS = dict((i, b(chr(i))) for i in range(256))
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
class TysonDecoder(object):
# TODO: adjust the decoder to understand the tyson format, now it does
# DRAFT 8 ubjson decoding
"""Decoder of UBJSON data to Python object following Draft 8 specification
and using next data mapping:
+--------+----------------------------+----------------------------+-------+
| Marker | UBJSON type | Python type | Notes |
+========+============================+============================+=======+
| ``N`` | noop | :const:`~simpleubjson.NOOP`| \(1) |
+--------+----------------------------+----------------------------+-------+
| ``Z`` | null | None | |
+--------+----------------------------+----------------------------+-------+
| ``F`` | false | bool | |
+--------+----------------------------+----------------------------+-------+
| ``T`` | true | bool | |
+--------+----------------------------+----------------------------+-------+
| ``B`` | byte | int | |
+--------+----------------------------+----------------------------+-------+
| ``i`` | int16 | int | |
+--------+----------------------------+----------------------------+-------+
| ``I`` | int32 | int | |
+--------+----------------------------+----------------------------+-------+
| ``L`` | int64 | long | |
+--------+----------------------------+----------------------------+-------+
| ``d`` | float | float | |
+--------+----------------------------+----------------------------+-------+
| ``D`` | double | float | |
+--------+----------------------------+----------------------------+-------+
| ``h`` | hugeint - 2 bytes | decimal.Decimal | |
+--------+----------------------------+----------------------------+-------+
| ``H`` | hugeint - 5 bytes | decimal.Decimal | |
+--------+----------------------------+----------------------------+-------+
| ``s`` | string - 2 bytes | unicode | |
+--------+----------------------------+----------------------------+-------+
| ``S`` | string - 5 bytes | unicode | |
+--------+----------------------------+----------------------------+-------+
| ``a`` | array - 2 bytes | list | |
+--------+----------------------------+----------------------------+-------+
| ``a`` | array - unsized | generator | \(2) |
+--------+----------------------------+----------------------------+-------+
| ``A`` | array - 5 bytes | list | |
+--------+----------------------------+----------------------------+-------+
| ``o`` | object - 2 bytes | dict | |
+--------+----------------------------+----------------------------+-------+
| ``o`` | object - unsized | generator | \(3) |
+--------+----------------------------+----------------------------+-------+
| ``O`` | object - 5 bytes | dict | |
+--------+----------------------------+----------------------------+-------+
Notes:
(1)
`NoOp` values are ignored by default if only `allow_noop` argument
wasn't passed as ``True``.
(2)
Nested generators are automatically converted to lists.
(3)
Unsized objects are represented as list of 2-element tuple with object
key and value.
"""
dispatch = {}
def __init__(self, source, allow_noop=False):
if isinstance(source, unicode):
source = source.encode('utf-8')
if isinstance(source, bytes):
source = BytesIO(source)
self.read = source.read
self.allow_noop = allow_noop
self.dispatch = self.dispatch.copy()
def __iter__(self):
return self
def next_tlv(self):
tag = self.read(1)
while tag == NOOP and not self.allow_noop:
tag = self.read(1)
if tag in NUMBERS:
if tag == INT8:
# Trivial operations for trivial cases saves a lot of time
value = ord(self.read(1))
if value > 128:
value -= 256
#value, = unpack('>b', self.read(1))
elif tag == INT16:
value, = unpack('>h', self.read(2))
elif tag == INT32:
value, = unpack('>i', self.read(4))
elif tag == INT64:
value, = unpack('>q', self.read(8))
elif tag == FLOAT:
value, = unpack('>f', self.read(4))
elif tag == DOUBLE:
value, = unpack('>d', self.read(8))
else:
raise MarkerError('tag %r not in NUMBERS %r' % (tag, NUMBERS))
return tag, None, value
elif tag in SHORT_OBJ:
length = ord(self.read(1))
if tag in STRINGS:
if length == 255:
raise MarkerError(
'Short string objects (%r) should not have length 255'
% tag)
return tag, length, self.read(length)
return tag, length, None
elif tag in LARGE_OBJ:
length, = unpack('>I', self.read(4))
if tag in STRINGS:
return tag, length, self.read(length)
return tag, length, None
elif tag in CONSTANTS:
return tag, None, None
elif not tag:
raise EarlyEndOfStreamError('nothing to decode')
else:
raise MarkerError('invalid marker 0x%02x (%r)' % (ord(tag), tag))
def decode_next(self):
tag, length, value = self.next_tlv()
return self.dispatch[tag](self, tag, length, value)
__next__ = next = decode_next
def decode_noop(self, tag, length, value):
return NOOP_SENTINEL
dispatch[NOOP] = decode_noop
def decode_none(self, tag, length, value):
return None
dispatch[NULL] = decode_none
def decode_false(self, tag, length, value):
return False
dispatch[FALSE] = decode_false
def decode_true(self, tag, length, value):
return True
dispatch[TRUE] = decode_true
def decode_int(self, tag, length, value):
return value
dispatch[INT8] = decode_int
dispatch[INT16] = decode_int
dispatch[INT32] = decode_int
dispatch[INT64] = decode_int
def decode_float(self, tag, length, value):
return value
dispatch[FLOAT] = decode_float
dispatch[DOUBLE] = decode_float
def decode_string(self, tag, length, value):
return value.decode('utf-8')
dispatch[STRING_S] = decode_string
dispatch[STRING_L] = decode_string
def decode_hidef(self, tag, length, value):
return Decimal(value.decode('utf-8'))
dispatch[HIDEF_S] = decode_hidef
dispatch[HIDEF_L] = decode_hidef
def decode_array(self, tag, length, value):
if tag == ARRAY_S and length == 255:
return self.decode_array_stream(tag, length, value)
res = [None] * length
next_tlv = self.next_tlv
dispatch = self.dispatch
forbidden = FORBIDDEN
streams = STREAMS
for _ in range(length):
tag, length, value = next_tlv()
if tag in forbidden:
raise MarkerError('invalid marker occurs: %02X' % ord(tag))
item = dispatch[tag](self, tag, length, value)
if tag in streams and length == 255:
item = list(item)
res[_] = item
return res
dispatch[ARRAY_S] = decode_array
dispatch[ARRAY_L] = decode_array
def decode_object(self, tag, length, value):
if tag == OBJECT_S and length == 255:
return self.decode_object_stream(tag, length, value)
res = {}
key = None
next_tlv = self.next_tlv
dispatch = self.dispatch
forbidden = FORBIDDEN
object_keys = OBJECT_KEYS
streams = STREAMS
for _ in range(length * 2):
tag, length, value = next_tlv()
if tag in forbidden:
raise MarkerError('invalid marker found: %02X' % ord(tag))
if key is None and tag not in object_keys:
raise MarkerError('key should be string, got %r' % (tag))
value = dispatch[tag](self, tag, length, value)
if key is None:
key = value
else:
if tag in streams and length == 255:
value = list(value)
res[key] = value
key = None
return res
dispatch[OBJECT_S] = decode_object
dispatch[OBJECT_L] = decode_object
def decode_array_stream(self, tag, length, value):
dispatch = self.dispatch
next_tlv = self.next_tlv
eos = EOS
streams = STREAMS
def array_stream():
while 1:
tag, length, value = next_tlv()
if tag == eos:
break
item = dispatch[tag](self, tag, length, value)
if tag in streams and length == 255:
yield list(item)
else:
yield item
return array_stream()
def decode_object_stream(self, tag, length, value):
dispatch = self.dispatch
next_tlv = self.next_tlv
eos = EOS
object_keys = OBJECT_KEYS
noop = NOOP
noop_sentinel = NOOP_SENTINEL
streams = STREAMS
def object_stream():
key = None
while 1:
tag, length, value = next_tlv()
if tag == noop and key is None:
yield noop_sentinel, noop_sentinel
elif tag == NOOP and key:
continue
elif tag == eos:
if key:
raise EarlyEndOfStreamError('value missed for key %r'
% key)
break
elif key is None and tag not in object_keys:
raise MarkerError('key should be string, got %r' % (tag))
else:
value = dispatch[tag](self, tag, length, value)
if key is None:
key = value
elif tag in streams:
yield key, list(value)
key = None
else:
yield key, value
key = None
return object_stream()
class TysonEncoder(object):
"""Encoder of Python objects into UBJSON data following Draft 8
specification rules with next data mapping:
+-----------------------------+------------------------------------+-------+
| Python type | UBJSON type | Notes |
+=============================+====================================+=======+
| :const:`~simpleubjson.NOOP` | NoOp | |
+-----------------------------+------------------------------------+-------+
| :const:`None` | null | |
+-----------------------------+------------------------------------+-------+
| :class:`bool` | :const:`False` => false | |
| | :const:`True` => true | |
+-----------------------------+------------------------------------+-------+
| :class:`int`, | `integer` or `huge` | \(1) |
| :class:`long` | | |
+-----------------------------+------------------------------------+-------+
| :class:`float` | `float`, `null` or `huge` | \(2) |
+-----------------------------+------------------------------------+-------+
| :class:`str`, | string | \(3) |
| :class:`unicode` | | \(4) |
+-----------------------------+------------------------------------+-------+
| :class:`tuple`, | sized array | \(3) |
| :class:`list`, | | |
| :class:`set`, | | |
| :class:`frozenset`, | | |
+-----------------------------+------------------------------------+-------+
| :class:`generator`, | unsized array | |
| :class:`XRange` | | |
+-----------------------------+------------------------------------+-------+
| :class:`dict` | object | \(3) |
| | | \(5) |
+-----------------------------+------------------------------------+-------+
| :class:`dict_itemsiterator` | unsized object | \(5) |
+-----------------------------+------------------------------------+-------+
| :class:`decimal.Decimal` | hidef | |
+-----------------------------+------------------------------------+-------+
Notes:
(1)
Depending on value it may be encoded into various UBJSON types:
* [-2^7, 2^7): ``int8``
* [-2^15, 2^15): ``int16``
* [-2^31, 2^31): ``int32``
* [-2^63, 2^63): ``int64``
* everything bigger/smaller: ``huge``
(2)
Depending on value it may be encoded into various UBJSON types:
* 1.18e-38 <= abs(value) <= 3.4e38: ``float``
* 2.23e-308 <= abs(value) < 1.8e308: ``double``
* :const:`inf`, :const:`-inf`: ``null``
* everything bigger/smaller: ``huge``
(3)
Depending on object length short or long version of UBJSON type may be
produced.
(4)
Unicode string are been encoded with utf-8 charset. Byte strings are
required to have `utf-8` encoding or :exc:`simpleubjson.EncodeError`
will be raised.
(5)
Dict keys should have string type or :exc:`simpleubjson.EncodeError`
will be raised.
Customization: When an integer array is encountered the encoder uses the
type information of the largest element, if it fails for any reason it
reverts and encodes the whole sequence as MIXED.
This works well when the numbers in the array are mostly in the same
range
"""
dispatch = {}
def __init__(self, default=None):
self._default = default or self.default
def default(self, obj):
raise EncodeError('unable to encode %r' % obj)
@classmethod
def encode(cls, data, output=None):
"""Encodes Python object to Universal Binary JSON data.
:param data: Python object.
:param output: `.write([data])`-able object. If omitted result would be
returned instead of written into.
:return: Encoded Python object. See mapping table below.
If `output` param is specified, all data would be written into it
by chunks and None will be returned.
"""
res = TysonEncoder(None).encode_next(data)
if output:
output.write(res)
else:
return res
def encode_next(self, obj):
tobj = type(obj)
if tobj in self.dispatch:
res = self.dispatch[tobj](self, obj)
else:
return self.encode_next(self._default(obj))
if isinstance(res, bytes):
return res
return bytes().join(res)
def encode_noop(self, obj):
return NOOP
dispatch[type(NOOP_SENTINEL)] = encode_noop
def encode_none(self, obj):
return NULL
dispatch[type(None)] = encode_none
def encode_bool(self, obj):
return TRUE if obj else FALSE
dispatch[bool] = encode_bool
def encode_int(self, obj, int_type=None):
if int_type:
if type(obj) not in [int, long]:
raise EncodeError('Not an integer: %r' % obj)
if int_type == INT8:
return CHARS[obj % 256]
elif int_type == INT16:
return pack('>h', obj)
elif int_type == INT32:
return pack('>i', obj)
elif int_type == INT64:
return pack('>q', obj)
if (-2 ** 7) <= obj <= (2 ** 7 - 1):
return INT8 + CHARS[obj % 256]
elif (-2 ** 15) <= obj <= (2 ** 15 - 1):
return INT16 + pack('>h', obj)
elif (-2 ** 31) <= obj <= (2 ** 31 - 1):
return INT32 + pack('>i', obj)
elif (-2 ** 63) <= obj <= (2 ** 63 - 1):
return INT64 + pack('>q', obj)
else:
return self.encode_decimal(Decimal(obj))
dispatch[int] = encode_int
dispatch[long] = encode_int
def encode_float(self, obj):
if 1.18e-38 <= abs(obj) <= 3.4e38:
return FLOAT + pack('>f', obj)
elif 2.23e-308 <= abs(obj) < 1.8e308:
return DOUBLE + pack('>d', obj)
elif isinf(obj) or isnan(obj):
return NULL
else:
return self.encode_decimal(Decimal(obj))
dispatch[float] = encode_float
def _encode_str(self, obj):
length = len(obj)
if length < 255:
return STRING_S + CHARS[length] + obj
else:
return STRING_L + INT32 + pack('>i', length) + obj
def encode_bytes(self, obj):
try:
obj.decode('utf-8')
except UnicodeDecodeError:
raise EncodeError('Invalid UTF-8 byte string: %r' % obj)
else:
return self._encode_str(obj)
dispatch[bytes] = encode_bytes
def encode_str(self, obj):
return self._encode_str(obj.encode('utf-8'))
dispatch[unicode] = encode_str
def encode_decimal(self, obj):
obj = unicode(obj).encode('utf-8')
length = len(obj)
if length < 255:
return HIDEF_S + CHARS[length] + obj
else:
return HIDEF_L + pack('>i', length) + obj
dispatch[Decimal] = encode_decimal
def encode_sequence(self, obj):
length = len(obj)
array_type = MIXED
try:
encoded_item = self.encode_next(max([abs(item) for item in obj]))
if encoded_item.startswith(INT8):
array_type = INT8
elif encoded_item.startswith(INT16):
array_type = INT16
elif encoded_item.startswith(INT32):
array_type = INT32
elif encoded_item.startswith(INT64):
array_type = INT64
except:
# not number elements
array_type = MIXED
if array_type != MIXED:
try:
if length < 255:
return_value = ARRAY_S + array_type + CHARS[length]
else:
return_value = ARRAY_L + array_type + pack('>I', length)
for item in obj:
return_value += self.encode_int(item, array_type)
yield return_value
except:
# the maximum was an integer, but the array is not homogeneous
array_type = MIXED
if array_type == MIXED:
if length < 255:
yield ARRAY_S + array_type + CHARS[length]
else:
yield ARRAY_L + array_type + pack('>I', length)
for item in obj:
yield self.encode_next(item)
dispatch[tuple] = encode_sequence
dispatch[list] = encode_sequence
dispatch[set] = encode_sequence
dispatch[frozenset] = encode_sequence
def encode_dict(self, obj):
length = len(obj)
if length < 255:
yield OBJECT_S + CHARS[length]
else:
yield OBJECT_L + pack('>I', length)
for key, value in obj.items():
if isinstance(key, unicode):
yield self.encode_str(key)
elif isinstance(key, bytes):
yield self.encode_bytes(key)
else:
raise EncodeError('invalid object key %r' % key)
yield self.encode_next(value)
dispatch[dict] = encode_dict
def encode_generator(self, obj):
yield ARRAY_S + FF
for item in obj:
yield self.encode_next(item)
yield EOS
dispatch[xrange] = encode_generator
dispatch[type((i for i in ()))] = encode_generator
dispatch[dict_keysiterator] = encode_generator
dispatch[dict_valuesiterator] = encode_generator
def encode_dictitems(self, obj):
yield OBJECT_S + FF
for key, value in obj:
if isinstance(key, unicode):
yield self.encode_str(key)
elif isinstance(key, bytes):
yield self.encode_bytes(key)
else:
raise EncodeError('invalid object key %r' % key)
yield self.encode_next(value)
yield EOS
dispatch[dict_itemsiterator] = encode_dictitems
|
steptools/StepNCViewer
|
scripts/tyson.py
|
Python
|
apache-2.0
| 25,604
|
data_file = "./csv/data.csv"
|
rwindegger/nuclai15
|
dota2/config.py
|
Python
|
gpl-3.0
| 28
|
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name = 'github-pr-form',
version = '0.1.1',
author = 'Aaron N Browne',
author_email = 'aaron0browne@gmail.com',
url = 'https://github.com/aaron0browne/github-pr-form',
description = 'A small python utility for generating forms from the command line and adding them to GitHub pull requests or issues, while tracking them in a secondary github repo.',
long_description = long_description,
download_url = 'httpd://github.com/aaron0browne/github-pr-form/tarball/v0.1.1',
license = 'MIT License',
keywords = ['github', 'rfc', 'change control', 'form', 'pull request'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Software Development :: Version Control'
],
packages = find_packages(),
install_requires = [
'github3.py==0.9.3',
'sh==1.09',
'click==3.3',
'markdown2==2.3.0',
'selenium==2.44.0'
],
entry_points = {
'console_scripts': [
'ghform = ghform.cli:cli'
]
}
)
|
aaron0browne/github-pr-form
|
setup.py
|
Python
|
mit
| 1,355
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from wagtail.tests.utils import WagtailTestUtils
class TestStyleGuide(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_styleguide(self):
response = self.client.get(reverse('wagtailstyleguide'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailstyleguide/base.html')
|
tangentlabs/wagtail
|
wagtail/contrib/wagtailstyleguide/tests.py
|
Python
|
bsd-3-clause
| 445
|
"""Running world simulations of Super Mario Bros
"""
__author__ = "Liyan Chen"
__copyright__ = "Copyright (c) 2017 Malmactor"
__license__ = "MIT"
import copy
import itertools as it
from momentum_handler import *
from simulatables import *
def layout_tobb(layout, config=None):
maxx, maxy = layout.shape
id2block = {0: 'air', 1: 'brick_block', 2: 'lava',
3: 'red_mushroom'} if config is None or "id2block" not in config else config["id2block"]
block2id = dict(map(lambda item: (item[1], item[0]), id2block.items()))
block_radius = (0.5, 0.5) if config is None or "block_radius" not in config else config["block_radius"]
pos2bb = {}
for x in range(maxx):
for y in range(maxy):
if layout[x, y] in (block2id['brick_block'], block2id['lava']):
pos2bb[(x, y)] = CollidableAABB((x, y), block_radius, config)
return pos2bb
def collision_proposal(mario, pos2bb, config=None,xdist=2):
"""
Propose potential collision box around mario
:param mario: CollidableRigid instance
:param pos2bb: Position-to-boundingbox mapping
:param config: Global configuration
:return: List of potential collision boxes
"""
minx, miny, maxx, maxy = -xdist, -1, xdist, 1
center = mario.get_center()
return list(map(lambda pos: pos2bb[pos],
filter(lambda pos: pos in pos2bb,
map(lambda d: tuple((center + d).astype("int")),
it.product(xrange(minx, maxx), xrange(miny, maxy))))))
def hit_edge_reaction(collision):
"""
Give corresponding handler to each edge hit.
Normal line indexed directions: 0: right, 1: left, 2: up, 3: down
:param collision: Collision dict
:return: Hit handler
"""
directions = np.array([[1.0, -1.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0]])
edge2action = {0: hit_sides, 1: hit_sides, 2: hit_ground, 3: hit_ceiling}
return edge2action[np.argmax(np.dot(collision['hit']['normal'], directions))]
def compensate_gravity(mario, surrounding_bb, config=None):
# If mario has gravity, it doesn't need compensation
if abs(mario.state[1, 2]) < config["greater_eps"]:
# Create a hypothesis of ground hitting test
hypothetical_mario = copy.deepcopy(mario)
hypothetical_mario.state[1, 2] = -0.05
hypothetical_mario.state[1, 1] = -0.05
hypothetical_mario.update()
return any(map(lambda bb: bb.collide(hypothetical_mario)['hit'] is not None, surrounding_bb))
class MarioSimulation:
def __init__(self, layout, config=None):
"""
Instantiate physics engine objects
:param layout: Two-dimensional numpy array layout
:param config: Global configuration
"""
self.layout = layout
self.config = config
init_pos = np.array([2, 3, 0]) if config is None or "init_pos" not in config else config["init_pos"]
mario_bb = np.array([0.5, 1]) if config is None or "mario_bb" not in config else config["mario_bb"]
self.mario = CollidableRigid(init_pos, mario_bb, config)
self.brick_bb = layout_tobb(layout, config)
# Start with gravity
self.mario.reaction(give_gravity)
def advance_frame(self, action):
"""
Update physics engine object states for next frame
:param action: agent action for current frame
:return: None
"""
if(self.mario.state[0][0] <= 0.5):
return
# Advance a time step
self.mario.update()
# Locate blocks for collision detections
bb_to_check = collision_proposal(self.mario, self.brick_bb, self.config)
gravity = compensate_gravity(self.mario, bb_to_check, self.config)
if not bb_to_check or gravity:
self.mario.reaction(give_gravity)
# Resolve collisions
collisions = list(filter(lambda pair: pair[1]['hit'] is not None,
map(lambda bb: (bb.get_center(), bb.collide(self.mario)), bb_to_check)))
if collisions:
closest_collision = min(collisions, key=lambda pair: pair[1]['hit']['time'])
self.mario.reaction(collision_resolved, closest_collision[1]["hit"]["delta"])
# self.mario.reaction(collision_resolved, closest_collision[1]["position"])
# Process momentum change
self.mario.reaction(hit_edge_reaction(closest_collision[1]))
# Grab an action from input and simulate the force
self.mario.reaction(action_mapping[action])
if collisions:
return closest_collision
def get_renderable(self):
return self.mario
def next_none_time(self, ori):
if(self.mario.state[0][0] <= 0.5):
return ori
ll = []
for bb in self.brick_bb:
if bb[0] == int(self.mario.state[0][0]):
if bb[1] < int(self.mario.state[1][0]):
ll.append(bb)
ll = sorted(ll,key=lambda x:x[1],reverse=True)
block_below = ll[0]
print self.mario.state, block_below
if self.mario.state[1][0] - block_below[1] > 1.05:
print "self", self.mario.state, block_below
print "END"
return 0
return ori
|
Malmactor/malrio
|
src/SuperMarioBros/simulation.py
|
Python
|
mit
| 5,352
|
VERSION = (0, 9, 2)
__author__ = 'Rogério Sampaio de Almeida'
__email__ = 'rsalmei@gmail.com'
__version__ = '.'.join(map(str, VERSION))
__all__ = ('__author__', '__version__', '__email__')
|
rsalmei/clearly
|
clearly/__init__.py
|
Python
|
mit
| 192
|
# -*- coding: utf-8 -*-
import re
import subprocess
def picamera_status():
"""Returns support and detection status for the Raspberry Pi Camera.
:return: (supported, detected)
:rtype: (bool, bool)
"""
supported, detected = False, False
vcgencmd_regex = r'supported=(0|1) detected=(0|1)'
try:
vcgencmd = subprocess.check_output(['vcgencmd', 'get_camera'])
result = re.match(vcgencmd_regex, vcgencmd).groups()
supported = bool(int(result[0]))
detected = bool(int(result[1]))
except (subprocess.CalledProcessError, OSError):
return False, False
return supported, detected
|
swoopi/swoopi
|
swoopi/utils.py
|
Python
|
apache-2.0
| 651
|
"""
developeraccount
FILE: __init__.py
Created: 7/5/15 12:42 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from accounts.views.other import *
from accounts.views.user import *
from accounts.views.sms import *
|
ekivemark/devaccount3
|
accounts/views/__init__.py
|
Python
|
gpl-2.0
| 214
|
import csv
import datetime
from csvkit.cleanup import RowChecker
from dateutil.parser import parse
import sqlalchemy as sa
from collections import OrderedDict
TRUE_VALUES = ('yes', 'y', 'true', 't')
FALSE_VALUES = ('no', 'n', 'false', 'f')
DEFAULT_DATETIME = datetime.datetime(9999, 12, 31, 0, 0, 0)
NULL_DATE = datetime.date(9999, 12, 31)
NULL_TIME = datetime.time(0, 0, 0)
class TypeInferer(object):
def __init__(self,
fpath,
encoding='utf-8',
delimiter=',',
quoting=csv.QUOTE_MINIMAL):
self.fpath = fpath
self.encoding = encoding
self.delimiter = delimiter
self.quoting = quoting
with open(self.fpath, 'r', encoding=self.encoding) as f:
reader = csv.reader(f, delimiter=self.delimiter, quoting=self.quoting)
self.header = next(reader)
self.types = OrderedDict()
def iterColumn(self, col_idx):
with open(self.fpath, 'r', encoding=self.encoding) as f:
reader = csv.reader(f, delimiter=self.delimiter, quoting=self.quoting)
header = next(reader)
checker = RowChecker(reader)
for row in checker.checked_rows():
try:
yield row[col_idx]
except IndexError:
continue
def infer(self):
for idx, col in enumerate(self.header):
self.tryAll(col, idx)
def tryAll(self, col, idx):
try:
self.types[col] = self.tryBoolean(idx)
return
except ValueError:
pass
try:
self.types[col] = self.tryInteger(idx)
return
except ValueError as e:
pass
try:
self.types[col] = self.tryFloat(idx)
return
except ValueError as e:
pass
try:
self.types[col] = self.tryDateTime(idx)
return
except (TypeError, ValueError) as e:
pass
try:
self.types[col] = self.tryDate(idx)
return
except (TypeError, ValueError) as e:
pass
self.types[col] = sa.String
def tryBoolean(self, col_idx):
for x in self.iterColumn(col_idx):
if x.lower() in TRUE_VALUES:
continue
elif x.lower() in FALSE_VALUES:
continue
else:
raise ValueError('Not boolean')
return sa.Boolean
def tryInteger(self, col_idx):
for x in self.iterColumn(col_idx):
if x == '':
continue
if isinstance(x, int):
continue
try:
int_x = int(x.replace(',', ''))
except ValueError as e:
raise e
try:
if x[0] == '0' and int(x) != 0:
raise ValueError('Not integer')
except ValueError as e:
raise e
return sa.Integer
def tryFloat(self, col_idx):
for x in self.iterColumn(col_idx):
if x == '':
continue
try:
float_x = float(x.replace(',', ''))
except ValueError as e:
raise e
return sa.Float
def tryDate(self, col_idx):
for x in self.iterColumn(col_idx):
if x == '' or x is None:
continue
try:
d = parse(x, default=DEFAULT_DATETIME)
except TypeError as e:
raise e
# Is it only a time?
if d.date() == NULL_DATE:
raise ValueError('Not a Date')
# Is it only a date?
elif d.time() == NULL_TIME:
continue
return sa.Date
def tryDateTime(self, col_idx):
for x in self.iterColumn(col_idx):
if x == '' or x is None:
continue
try:
d = parse(x, default=DEFAULT_DATETIME)
except TypeError as e:
raise e
# Is it only a time?
if d.date() == NULL_DATE:
raise ValueError('Not a DateTime')
# Is it only a date?
elif d.time() == NULL_TIME:
raise ValueError('Not a DateTime')
# It must be a date and time
else:
continue
return sa.DateTime
if __name__ == "__main__":
fpath = 'downloads/FiledDocs.txt'
inferer = TypeInferer(fpath)
inferer.infer()
import json
print(json.dumps(inferer.types, indent=4))
|
datamade/dedupe-geocoder
|
typeinferer.py
|
Python
|
mit
| 4,788
|
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import datetime
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
# social sharing settings
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# Note that this is the Studio key for Segment. There is a separate key for the LMS.
CMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### IONISx auth configuration
IONISX_AUTH = AUTH_TOKENS.get('IONISX_AUTH')
##### Third-party auth options ################################################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH'):
AUTHENTICATION_BACKENDS = (
ENV_TOKENS.get('THIRD_PARTY_AUTH_BACKENDS', [
'social.backends.google.GoogleOAuth2',
'social.backends.linkedin.LinkedinOAuth2',
'social.backends.facebook.FacebookOAuth2',
'third_party_auth.saml.SAMLAuthBackend',
'third_party_auth.lti.LTIAuthBackend',
]) + list(AUTHENTICATION_BACKENDS)
)
if IONISX_AUTH:
MIDDLEWARE_CLASSES += ('third_party_auth.middleware.PortalSynchronizerMiddleware',)
# The reduced session expiry time during the third party login pipeline. (Value in seconds)
SOCIAL_AUTH_PIPELINE_TIMEOUT = ENV_TOKENS.get('SOCIAL_AUTH_PIPELINE_TIMEOUT', 600)
# Most provider configuration is done via ConfigurationModels but for a few sensitive values
# we allow configuration via AUTH_TOKENS instead (optionally).
# The SAML private/public key values do not need the delimiter lines (such as
# "-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----" etc.) but they may be included
# if you want (though it's easier to format the key values as JSON without the delimiters).
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
SOCIAL_AUTH_OAUTH_SECRETS = AUTH_TOKENS.get('SOCIAL_AUTH_OAUTH_SECRETS', {})
SOCIAL_AUTH_LTI_CONSUMER_SECRETS = AUTH_TOKENS.get('SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {})
# third_party_auth config moved to ConfigurationModels. This is for data migration only:
THIRD_PARTY_AUTH_OLD_CONFIG = AUTH_TOKENS.get('THIRD_PARTY_AUTH', None)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
DEPRECATED_ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get(
'DEPRECATED_ADVANCED_COMPONENT_TYPES', DEPRECATED_ADVANCED_COMPONENT_TYPES
)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
|
IONISx/edx-platform
|
cms/envs/aws.py
|
Python
|
agpl-3.0
| 16,544
|
'''
.. Created on Mar 20, 2014
.. codeauthor:: Robert Langlois <rl2528@columbia.edu>
'''
_compiler_options=None
def ccompiler_options():
'''
'''
#from numpy.distutils.ccompiler import new_compiler
#from numpy.distutils.fcompiler.pg import PGroupFCompiler
#from numpy.distutils.fcompiler.gnu import GnuFCompiler
#ccompiler = new_compiler()
# Todo test for PGI compiler
#openmp_enabled, needs_gomp = detect_openmp()
openmp_enabled = detect_openmp()[0]
#'-march=k8', '-mfpmath=sse', '-m64', '-ffast-math', '-pipe'
#'-O3', '-march=athlon-xp', '-mfpmath=sse', '-msse', '-funroll-loops', '-pipe'
compiler_args = ['-O2', '-funroll-loops', '-msse2', '-mfpmath=sse']#, '-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION'] #, '-mssse3' #, '-fast', '-Minfo=all', '-Mscalarsse', '-Mvect=sse']#, '-tp=nehalem-64']
if openmp_enabled:
compiler_args.append('-fopenmp')
compiler_libraries = [] #['gomp'] if needs_gomp else []
compiler_defs = [('USE_OPENMP', None)] if openmp_enabled else []
return compiler_args, compiler_libraries, compiler_defs
def fcompiler_options():
'''
'''
from numpy.distutils.fcompiler import new_fcompiler
from numpy.distutils.fcompiler.pg import PGroupFCompiler
from numpy.distutils.fcompiler.gnu import GnuFCompiler
import sys
fcompiler = new_fcompiler()
if issubclass(fcompiler.__class__, PGroupFCompiler):
openmp_enabled, needs_gomp = detect_openmp()
compiler_args = ['-fastsse', '-fast', '-Minfo=all', '-Mscalarsse', '-Mvect=sse', '-Wtabs']#, '-tp=nehalem-64']
if openmp_enabled:
compiler_args.append('-mp=nonuma')
compiler_libraries = [] if needs_gomp else []
compiler_defs = [('USE_OPENMP', None)] if openmp_enabled else []
elif issubclass(fcompiler.__class__, GnuFCompiler):
openmp_enabled, needs_gomp = detect_openmp()
compiler_args = ['-O3', '-funroll-loops'] #, '--std=gnu99'
if openmp_enabled:
compiler_args.append('-fopenmp')
compiler_libraries = [] if needs_gomp else []
compiler_defs = [('USE_OPENMP', None)] if openmp_enabled else []
else:
raise ValueError, "Fortran compiler not supported: %s"%fcompiler.__class__.__name__
if sys.platform=='darwin':
compiler_args.extend(['-undefined dynamic_lookup', '-bundle'])
return compiler_args, compiler_libraries, compiler_defs
def compiler_options():
'''
'''
import numpy
from distutils.version import LooseVersion
global _compiler_options
if _compiler_options is None:
foptions = fcompiler_options()
coptions = ccompiler_options()
_compiler_options = foptions + coptions
if LooseVersion(numpy.__version__) < LooseVersion('1.6.2'):
import sys
sys.argv.extend(['config_fc', '--f77flags="%s"'%" ".join(foptions[0]), '--f90flags="%s"'%" ".join(foptions[0])])
return _compiler_options
def hasfunction(cc, funcname, add_opts=False, includes=[]):
'''
.. note::
Adopted from https://github.com/SimTk/IRMSD/blob/master/python/setup.py
'''
import tempfile, os, shutil, sys
tmpdir = tempfile.mkdtemp(prefix='arachnid-install-')
devnull = oldstderr = None
try:
try:
fname = os.path.join(tmpdir, 'funcname.c')
f = open(fname, 'w')
for inc in includes:
f.write('#include %s\n'%inc)
f.write('int main(void) {\n')
f.write(' %s();\n' % funcname)
f.write('}\n')
f.close()
# Redirect stderr to /dev/null to hide any error messages
# from the compiler.
# This will have to be changed if we ever have to check
# for a function on Windows.
devnull = open('/dev/null', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
opts = ['-fopenmp'] if add_opts else []
objects = cc.compile([fname], output_dir=tmpdir, extra_postargs=opts)
cc.link_executable(objects, os.path.join(tmpdir, "a.out"), extra_postargs=opts)
except:
return False
return True
finally:
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if devnull is not None:
devnull.close()
shutil.rmtree(tmpdir)
def detect_openmp():
'''
.. note::
Adopted from https://github.com/SimTk/IRMSD/blob/master/python/setup.py
'''
from distutils.ccompiler import new_compiler
compiler = new_compiler()
print "Attempting to autodetect OpenMP support...",
hasopenmp = hasfunction(compiler, 'omp_get_num_threads', True, includes=['<omp.h>'])
needs_gomp = hasopenmp
if not hasopenmp:
compiler.add_library('gomp')
hasopenmp = hasfunction(compiler, 'omp_get_num_threads', includes=['<omp.h>'])
needs_gomp = hasopenmp
print
if hasopenmp:
print "Compiler supports OpenMP"
else:
print "Did not detect OpenMP support; parallel code disabled"
return hasopenmp, needs_gomp
|
ezralanglois/arachnid
|
arachnid/distutils/compiler.py
|
Python
|
gpl-2.0
| 5,254
|
import time
import RPi.GPIO as GPIO
import pickle
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import sys
#loads the UI
qtCreatorFile = "../UI/AutomatedBreweryUI/FlowCalibrationDialog.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class flowCalibration(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self,flowPins = [8,7,12,16,20,21],flowNames = ["HLT In","HLT Out","MLT In","MLT Out","BLK In","BLK Out"]):
super(flowCalibration,self).__init__()
self.setupUi(self)
self.show()
#Sets headers to auto-adjust
self.Calibration_Points.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
#Initially clears the tables
self.clearPulses()
for i in range(0,self.Calibration_Points.rowCount()+1): self.Calibration_Points.removeRow(0)
#Sets up the pins
#Note: this code should match the FlowSensor.py code, but pulls raw pulse counts
#values instead of calibrated flows
#stores the parameters
self.flowPins = flowPins
self.flowNames = flowNames
#Sets the pins and related events
GPIO.setmode(GPIO.BCM)
for i in range(0,6):
GPIO.setup(flowPins[i],GPIO.IN,pull_up_down = GPIO.PUD_UP)
GPIO.add_event_detect(flowPins[i], GPIO.FALLING, callback=self.countPulse)
self.Add_Point.clicked.connect(self.addPoint)
self.Remove_Point.clicked.connect(self.removePoint)
self.Complete_Calibration.clicked.connect(self.completeCalibration)
self.Sensor_To_Calibrate.currentIndexChanged.connect(self.clearPulses)
self.Reset_Pulse_Count.clicked.connect(self.clearPulses)
#Imports the old calibration
with open('../calibrations/FlowCalibration.pk1','rb') as input:
self.oldCalibration = pickle.load(input)
print("Old calibration:")
print(self.oldCalibration)
print("")
def countPulse(self,pin):
if pin == self.flowPins[self.flowNames.index(self.Sensor_To_Calibrate.currentText())]:
numPulses = int(self.Num_Pulses.text())
numPulses += 1
self.Num_Pulses.setText(str(numPulses))
def clearPulses(self):
self.Num_Pulses.setText("0")
def addPoint(self):
rowCount = self.Calibration_Points.rowCount()
self.Calibration_Points.insertRow(rowCount)
self.Calibration_Points.setItem(rowCount,0,QtWidgets.QTableWidgetItem(self.Sensor_To_Calibrate.currentText()))
self.Calibration_Points.setItem(rowCount,1,QtWidgets.QTableWidgetItem(self.Num_Pulses.text()))
self.Calibration_Points.setItem(rowCount,2,QtWidgets.QTableWidgetItem(self.Flow_Volume.text()))
numPulses = float(self.Num_Pulses.text())
volume = float(self.Flow_Volume.text())
volumePerPulse = volume/numPulses
self.Calibration_Points.setItem(rowCount,3,QtWidgets.QTableWidgetItem("{:.16f}".format(volumePerPulse)))
self.clearPulses()
def removePoint(self):
rowToRemove = int(self.Pt_To_Remove.text())-1
self.Calibration_Points.removeRow(rowToRemove)
def completeCalibration(self):
#Initializes the calibration results as empty
self.allCalibrationResults = [[],[],[],[],[],[]]
self.calibrationResults=[]
#Loops through the table to create the calibration results
for i in range(0,self.Calibration_Points.rowCount()):
sensor=self.flowNames.index(self.Calibration_Points.item(i,0).text())
volumePerPulse = float(self.Calibration_Points.item(i,3).text())
self.allCalibrationResults[sensor].append(volumePerPulse)
print("All calibration points:")
print(self.allCalibrationResults)
print("")
#Averages the calibrations for each of the sensors to produce one value
#If there are no values, then uses the old calibration value
for i in range(0,6):
allSensorResults = self.allCalibrationResults[i]
if len(allSensorResults) == 0: self.calibrationResults.append(self.oldCalibration[i])
else:
sensorResult = float(sum(allSensorResults))/float(len(allSensorResults))
self.calibrationResults.append(sensorResult)
print("New calibration:")
print(self.calibrationResults)
#Creates a pickle with the calibration results
with open('FlowCalibration.pk1','wb') as output:
pickle.dump(self.calibrationResults,output,protocol = pickle.HIGHEST_PROTOCOL)
#closes window and stops sensor thread
#self.stop = True
self.close()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = flowCalibration()
sys.exit(app.exec_())
|
Bobstin/AutomatedBrewery
|
calibrations/FlowCalibration.py
|
Python
|
mit
| 4,847
|
import numpy as np
from numpy import sin, cos
from params import *
def get_c1(q, q_d, psi=0):
x,y,a,b,g = q
x_d,y_d,a_d,b_d,_ = q_d
return np.array([
m1 + m2,
0,
- L1*m1*sin(g + a + b),
- L1*m1*sin(g + a + b) \
- L2*m1*sin(g + b) \
- L2*eps*m2*sin(g + b) \
- L3*m1*sin(b) - L3*m2*sin(b)
])
def get_d1(q, q_d, psi=0):
x,y,a,b,g = q
x_d,y_d,a_d,b_d,_ = q_d
return - L1*m1*cos(g + a + b)*a_d**2 \
- 2*L1*m1*cos(g + a + b)*a_d*b_d \
- L1*m1*cos(g + a + b)*b_d**2 \
- L2*eps*m2*cos(g + b)*b_d**2 \
- L2*m1*cos(g + b)*b_d**2 \
- L3*m1*cos(b)*b_d**2 \
- L3*m2*cos(b)*b_d**2
|
tsybulkin/jumper
|
x_f.py
|
Python
|
bsd-2-clause
| 611
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 - 2017 Björn Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of python-quilt for details.
import os.path
import stat
from quilt.backup import Backup
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import QuiltError, NoAppliedPatch
from quilt.signals import Signal
from quilt.utils import Directory, File
class Add(Command):
"""Command class to add files to the current patch
"""
file_added = Signal()
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Add, self).__init__(cwd)
self.quilt_pc = Directory(quilt_pc)
self.quilt_patches = Directory(quilt_patches)
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _file_in_patch(self, filename, patch, ignore):
""" Checks if a backup file of the filename in the current patch
exists """
file = self.quilt_pc + File(os.path.join(patch.get_name(), filename))
if file.exists():
if ignore:
return True
else:
raise QuiltError("File %s is already in patch %s" % (filename,
patch.get_name()))
return False
def _file_in_next_patches(self, filename, patch):
""" Checks if a backup file of the filename in the applied patches after
patch exists """
if not self.db.is_patch(patch):
# no patches applied
return
patches = self.db.patches_after(patch)
for patch in patches:
file = self.quilt_pc + File(os.path.join(patch.get_name(),
filename))
if file.exists():
raise QuiltError("File %s is already modified by patch %s" %
(filename, patch.get_name()))
def _backup_file(self, file, patch):
""" Creates a backup of file """
dest_dir = self.quilt_pc + patch.get_name()
file_dir = file.get_directory()
if file_dir:
#TODO get relative path
dest_dir = dest_dir + file_dir
backup = Backup()
backup.backup_file(file, dest_dir, copy_empty=True)
def add_file(self, filename, patch_name=None, ignore=False):
""" Add file to the patch with patch_name.
If patch_name is None or empty the topmost patch will be used.
Adding an already added patch will raise an QuiltError if ignore is
False.
"""
file = File(filename)
if patch_name:
patch = Patch(patch_name)
else:
patch = self.db.top_patch()
if not patch:
raise NoAppliedPatch(self.db)
exists = self._file_in_patch(filename, patch, ignore)
if exists:
return
self._file_in_next_patches(filename, patch)
if file.is_link():
raise QuiltError("Cannot add symbolic link %s" % filename)
self._backup_file(file, patch)
if file.exists():
# be sure user can write original file
os.chmod(filename, file.get_mode() | stat.S_IWUSR | stat.S_IRUSR)
self.file_added(file, patch)
def add_files(self, filenames, patch_name=None, ignore=False):
for filename in filenames:
self.add_file(filename, patch_name, ignore)
|
bjoernricks/python-quilt
|
quilt/add.py
|
Python
|
mit
| 3,508
|
# MCM矩阵连乘问题
INF = 0x3f3f3f3f
def mcm(arr):
n = len(arr)
dp = [[0]*n]*n
for i in range(1, n): # cost is zero when multiplying one matrix.
dp[i][i] = 0
for l in range(2, n):
for i in range(1, n-l+1):
j = i+l-1
dp[i][j] = INF
for k in range(i, j):
dp[i][j] = min(dp[i][j], dp[i][k]+dp[k+1][j] + arr[i-1] * arr[k] * arr[j])
return dp[1][n-1]
def read():
return [int(i) for i in input().split()]
if __name__ == '__main__':
print(mcm(read()))
|
hnu2013wwj/XNCodes
|
DynamicProgramming/MCM.py
|
Python
|
gpl-2.0
| 554
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime, timedelta
from com.ericsson.xn.commons import CommonStatic
from com.ericsson.xn.commons.PyProperties import TrimableProps
from com.ericsson.xn.commons.osutils import get_ne_info_from_cfg, get_pm_counters_map, get_me_types_map
from com.ericsson.xn.x.ne import NeCommon
from com.ericsson.xn.x.pm.PmCommons import PmCommon
from com.ericsson.xn.commons import test_logger as test
def check_pm_accurate(ne_info_cfg, counter_info_cfg, server_info_path, str_end_time, number_of_lic, check_rounds=12,
me_counter_cfg=None, me_types_cfg=None):
ne_info = get_ne_info_from_cfg(ne_info_cfg)
counters_pm = get_pm_counters_map(counter_info_cfg)
if 12 * number_of_lic != len(counters_pm):
test.error('Lines of expected counters should equal 12 multiple number of LICs.')
server_info = TrimableProps(server_info_path)
dict_browser_chrome = {
"browser_type": os.path.normpath(server_info.getProperty('browser_type')),
"browser_path": os.path.normpath(server_info.getProperty('browser_path')),
"driver_path": os.path.normpath(server_info.getProperty('driver_path'))
}
host = server_info.getProperty('host')
username = server_info.getProperty('username')
password = server_info.getProperty('password')
port = server_info.getProperty('port')
url = server_info.getProperty('url')
dict_additinal = {
'number_of_lic': number_of_lic,
'check_rounds': check_rounds
}
driver = CommonStatic.login_rsnms(dict_browser_chrome, host, username, password, port, url)
if driver:
try:
end_time = datetime.strptime(str_end_time, '%Y-%m-%d %H:%M:%S')
rounds = len(counters_pm) / dict_additinal['number_of_lic']
start_time = end_time + timedelta(minutes=-5 * check_rounds)
NeCommon.to_ne_management_page_by_url(driver, server_info)
dict_ne_info = NeCommon.check_and_add_ne(driver, ne_info)
PmCommon.to_pm_management_page_by_url(driver, ne_info['ne_type'], server_info)
PmCommon.make_in_correct_tab(driver, ne_info['tab_pre'], '')
PmCommon.wait_until_pm_date_show_up(driver, dict_ne_info['ne_name'])
PmCommon.init_and_search(driver, dict_ne_info['ne_name'], end_time, start_time)
PmCommon.check_pm_rows_updated(driver, dict_ne_info['ne_type'], counters_pm, 10, dict_additinal)
if ne_info.has_key('tab_me') and me_counter_cfg is not None and me_types_cfg is not None:
test.info('Found ME Tab information, will check ME counters.')
dict_me_add = {
'rows_each_period': 1,
'check_rounds': check_rounds
}
me_counters = get_pm_counters_map(me_counter_cfg)
me_types = get_me_types_map(me_types_cfg)
if 12 * dict_me_add['rows_each_period'] != len(me_counters):
test.error('Expected ME counters mis-match.')
PmCommon.make_in_correct_tab(driver, ne_info['tab_me'], '')
PmCommon.wait_until_pm_date_show_up(driver, dict_ne_info['ne_name'])
PmCommon.init_and_search(driver, dict_ne_info['ne_name'], end_time, start_time)
# PmCommon.wait_until_rounds_ok(driver, len(me_counters), 10, dict_me_add)
PmCommon.check_me_counters(driver, dict_ne_info['ne_name'], me_counters, 10, dict_me_add, me_types)
CommonStatic.logout_rsnms(driver)
finally:
CommonStatic.quite_driver(driver)
def check_sbc_pm(ne_info_cfg, counter_info_cfg, server_info_path, number_of_lic, check_rounds=4, me_counter_cfg=None):
ne_info = get_ne_info_from_cfg(ne_info_cfg)
counters_pm = get_pm_counters_map(counter_info_cfg)
server_info = TrimableProps(server_info_path)
dict_browser_chrome = {
"browser_type": server_info.getProperty('browser_type'),
"browser_path": server_info.getProperty('browser_path'),
"driver_path": server_info.getProperty('driver_path')
}
host = server_info.getProperty('host')
username = server_info.getProperty('username')
password = server_info.getProperty('password')
port = server_info.getProperty('port')
url = server_info.getProperty('url')
dict_additinal = {
'number_of_lic': number_of_lic,
'check_rounds': check_rounds
}
driver = CommonStatic.login_rsnms(dict_browser_chrome, host, username, password, port, url)
if driver:
try:
NeCommon.to_ne_management_page_by_url(driver, server_info)
dict_ne_info = NeCommon.check_and_add_ne(driver, ne_info)
PmCommon.to_pm_management_page_by_url(driver, ne_info['ne_type'], server_info)
PmCommon.make_in_correct_tab(driver, ne_info['tab_pre'], '')
PmCommon.wait_until_pm_date_show_up(driver, dict_ne_info['ne_name'])
if 12 * dict_additinal['number_of_lic'] != len(counters_pm):
test.error('Expected counters file error.')
PmCommon.init_and_search(driver, dict_ne_info['ne_name'])
# PmCommon.wait_until_rounds_ok(driver, len(counters_pm), 10, dict_additinal)
PmCommon.check_pm_rows_updated(driver, dict_ne_info['ne_type'], counters_pm, 10, dict_additinal)
CommonStatic.logout_rsnms(driver)
finally:
CommonStatic.quite_driver(driver)
|
lowitty/selenium
|
com/ericsson/xn/x/pm/PmCommons/PmBaseFunc.py
|
Python
|
mit
| 5,491
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded TCP server."""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import errno
import os
import socket
from salt.ext.tornado import gen
from salt.ext.tornado.log import app_log
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado.iostream import IOStream, SSLIOStream
from salt.ext.tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
from salt.ext.tornado import process
from salt.ext.tornado.util import errno_from_exception
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine.
ssl = None
class TCPServer(object):
r"""A non-blocking, single-threaded TCP server.
To use `TCPServer`, define a subclass which overrides the `handle_stream`
method. For example, a simple echo server could be defined like this::
from salt.ext.tornado.tcpserver import TCPServer
from salt.ext.tornado.iostream import StreamClosedError
from salt.ext.tornado import gen
class EchoServer(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
data = yield stream.read_until(b"\n")
yield stream.write(data)
except StreamClosedError:
break
To make this server serve SSL traffic, send the ``ssl_options`` keyword
argument with an `ssl.SSLContext` object. For compatibility with older
versions of Python ``ssl_options`` may also be a dictionary of keyword
arguments for the `ssl.wrap_socket` method.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
TCPServer(ssl_options=ssl_ctx)
`TCPServer` initialization follows one of three patterns:
1. `listen`: simple single-process::
server = TCPServer()
server.listen(8888)
IOLoop.current().start()
2. `bind`/`start`: simple multi-process::
server = TCPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.current().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `TCPServer` constructor. `start` will always start
the server on the default singleton `.IOLoop`.
3. `add_sockets`: advanced multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
server = TCPServer()
server.add_sockets(sockets)
IOLoop.current().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`~tornado.netutil.bind_sockets`.
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
"""
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
read_chunk_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object
self._pending_sockets = []
self._started = False
self._stopped = False
self.max_buffer_size = max_buffer_size
self.read_chunk_size = read_chunk_size
# Verify the SSL options. Otherwise we don't get errors until clients
# connect. This doesn't verify that the keys are legitimate, but
# the SSL module doesn't do that until there is a connected socket
# which seems like too much work
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
# Only certfile is required: it can contain both keys
if 'certfile' not in self.ssl_options:
raise KeyError('missing key "certfile" in ssl_options')
if not os.path.exists(self.ssl_options['certfile']):
raise ValueError('certfile "%s" does not exist' %
self.ssl_options['certfile'])
if ('keyfile' in self.ssl_options and
not os.path.exists(self.ssl_options['keyfile'])):
raise ValueError('keyfile "%s" does not exist' %
self.ssl_options['keyfile'])
def listen(self, port, address=""):
"""Starts accepting connections on the given port.
This method may be called more than once to listen on multiple ports.
`listen` takes effect immediately; it is not necessary to call
`TCPServer.start` afterwards. It is, however, necessary to start
the `.IOLoop`.
"""
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
def add_sockets(self, sockets):
"""Makes this server start accepting connections on the given sockets.
The ``sockets`` parameter is a list of socket objects such as
those returned by `~tornado.netutil.bind_sockets`.
`add_sockets` is typically used in combination with that
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection,
io_loop=self.io_loop)
def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object."""
self.add_sockets([socket])
def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128,
reuse_port=False):
"""Binds this server to the given port on the given address.
To start the server, call `start`. If you want to run this server
in a single process, you can call `listen` as a shortcut to the
sequence of `bind` and `start` calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen <socket.socket.listen>`. The ``reuse_port`` argument
has the same meaning as for `.bind_sockets`.
This method may be called multiple times prior to `start` to listen
on multiple ports or interfaces.
.. versionchanged:: 4.4
Added the ``reuse_port`` argument.
"""
sockets = bind_sockets(port, address=address, family=family,
backlog=backlog, reuse_port=reuse_port)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(self, num_processes=1):
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self):
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
if self._stopped:
return
self._stopped = True
for fd, sock in self._sockets.items():
assert sock.fileno() == fd
self.io_loop.remove_handler(fd)
sock.close()
def handle_stream(self, stream, address):
"""Override to handle a new `.IOStream` from an incoming connection.
This method may be a coroutine; if so any exceptions it raises
asynchronously will be logged. Accepting of incoming connections
will not be blocked by this coroutine.
If this `TCPServer` is configured for SSL, ``handle_stream``
may be called before the SSL handshake has completed. Use
`.SSLIOStream.wait_for_handshake` if you need to verify the client's
certificate or use NPN/ALPN.
.. versionchanged:: 4.2
Added the option for this method to be a coroutine.
"""
raise NotImplementedError()
def _handle_connection(self, connection, address):
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl_wrap_socket(connection,
self.ssl_options,
server_side=True,
do_handshake_on_connect=False)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error as err:
# If the connection is closed immediately after it is created
# (as in a port scan), we can get one of several errors.
# wrap_socket makes an internal call to getpeername,
# which may return either EINVAL (Mac OS X) or ENOTCONN
# (Linux). If it returns ENOTCONN, this error is
# silently swallowed by the ssl module, so we need to
# catch another error later on (AttributeError in
# SSLIOStream._do_ssl_handshake).
# To test this behavior, try nmap with the -sT flag.
# https://github.com/tornadoweb/tornado/pull/750
if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
else:
stream = IOStream(connection, io_loop=self.io_loop,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
future = self.handle_stream(stream, address)
if future is not None:
self.io_loop.add_future(gen.convert_yielded(future),
lambda f: f.result())
except Exception:
app_log.error("Error in connection callback", exc_info=True)
|
saltstack/salt
|
salt/ext/tornado/tcpserver.py
|
Python
|
apache-2.0
| 12,601
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import boto.ec2
import dateutil
import datetime
import json
from pricing.instance_pricing import InstancePricing
from pricing.spot_pricing import SpotPricing
# region.name, name, instance_type, price, archdomain, env_tag, ip_address, private_ip_address ...
def sanitize(value):
if value is None or value == "":
return "unknown"
else:
return value.replace("<", "").replace(">","").replace(" ", "_")
def get_host_name(i):
# Sometimes instances transiently don't have names
if 'Name' in i.tags:
return sanitize(i.tags['Name'])
else:
return "unknown"
def get_archdomain(i):
if 'archdomain' in i.tags:
return sanitize(i.tags['archdomain'])
else:
return "unknown"
def get_env_tag(i):
if 'env' in i.tags:
return sanitize(i.tags['env'])
else:
return "unknown"
def get_all_instances(ec2):
try:
return ec2.get_all_instances()
except:
return []
def main(args):
instance_pricer = InstancePricing()
spot_pricer = SpotPricing()
results = []
for region in boto.ec2.regions():
ec2 = region.connect()
reservations = get_all_instances(ec2)
for reservation in reservations:
for i in reservation.instances:
name = get_host_name(i)
archdomain = get_archdomain(i)
env_tag = get_env_tag(i)
if i.ip_address and i.private_ip_address and name:
if i.spot_instance_request_id is not None:
start_time = dateutil.parser.parse(i.launch_time)
end_time = datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
hourly_price = spot_pricer.get_spot_instance_pricing(i.instance_type, start_time, end_time,
i.placement, i.spot_instance_request_id)
else:
hourly_price = instance_pricer.get_ondemand_pricing(region.name, i.instance_type)
results.append((region.name, name, i.instance_type, str(hourly_price), archdomain, env_tag, i.ip_address, i.private_ip_address))
if args.format=='tsv':
for line in results:
print("\t".join(line))
elif args.format=='json-lines':
for line in results:
print(json.dumps(dict(zip(['region', 'name', 'type', 'price', 'archdomain', 'env', 'ip_public', 'ip_private'], line))))
elif args.format=='json':
print('[')
for line in results:
print(json.dumps(dict(zip(['region', 'name', 'type', 'price', 'archdomain', 'env', 'ip_public', 'ip_private'], line))), end=',\n')
print(']')
else:
raise ValueError("--format tsv|json|json-lines")
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.set_defaults(func=main)
parser.add_argument('--format', default='tsv')
args = parser.parse_args()
args.func(args)
|
leighklotz/traffic-map
|
list/list-ec2-hosts-table.py
|
Python
|
gpl-2.0
| 3,093
|
#A basic way of caching files associated with URLs
from datetime import datetime
import os
import urllib2
import tempfile
import json
import socket
import utilities
import shutil
class URLCache(object):
TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, folder):
self._folder = os.path.join(folder, 'cache')
self._file = os.path.join(folder, 'cache.json')
def __enter__(self):
if not os.path.exists(self._folder):
os.makedirs(self._folder)
try:
fyle = open(self._file, 'r')
except IOError:
#create the file and try again.
open(self._file, 'a').close()
fyle = open(self._file, 'r')
try:
self._cache = json.load(fyle)
except ValueError:
self._cache = dict()
fyle.close()
return self
def __exit__(self, typ, value, traceback):
self.flush()
with open(self._file, 'w+') as fyle:
json.dump(self._cache, fyle, indent=2)
def remove(self, url):
if url in self._cache:
entry = self._cache[url]
if os.path.isfile(entry['resource']):
os.remove(entry['resource'])
del self._cache[url]
def flush(self):
flushlist = list()
for url, entry in self._cache.iteritems():
if not os.path.isfile(entry['resource']) or utilities.strptime(entry['expiry'], self.TIME_FORMAT) < datetime.utcnow():
flushlist.append(url)
for url in flushlist:
self.remove(url)
def erase(self):
os.remove(self._file)
shutil.rmtree(self._folder)
def get(self, url, expiry_callback, resource_callback=None):
"""
Checks to see if an item is in cache
"""
try:
entry = self._cache[url]
if not os.path.isfile(entry['resource']) or utilities.strptime(entry['expiry'], self.TIME_FORMAT) < datetime.utcnow():
raise InvalidCacheError
else:
return entry['resource']
except (KeyError, InvalidCacheError):
#(src, headers) = urllib.urlretrieve(url)
try:
response = urllib2.urlopen(url)
except (socket.timeout, urllib2.URLError) as e:
e.args = (str(e), url)
raise
page = response.read()
response.close()
tmp = tempfile.NamedTemporaryFile(dir=self._folder, delete=False)
tmp.write(page)
tmp.close()
expiry = expiry_callback(tmp.name)
if resource_callback:
resource_callback(tmp.name)
self._cache[url] = {'resource': tmp.name, 'expiry': expiry.strftime(self.TIME_FORMAT)}
return tmp.name
class InvalidCacheError(Exception):
pass
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/weather.metoffice/src/metoffice/urlcache.py
|
Python
|
apache-2.0
| 2,858
|
from typing import List
class Solution:
def largestIsland(self, grid: List[List[int]]) -> int:
islands = set()
searched = set()
width = len(grid)
height = len(grid[0])
area = 0
for x in range(0, width):
for y in range(0, height):
if (x, y) in searched:
continue
# try to get island
searched.add((x, y))
if grid[x][y] == 0:
continue
# island begin, get left and right
island = self.getIsland(grid, x, y)
for pos in island:
searched.add(pos)
area = max(area, len(island))
return area
def getIsland(self, grid:List[List[int]], x, y):
width = len(grid)
height = len(grid[0])
wait = set()
wait.add((x, y))
searched = set()
island = set()
while wait:
temp = set()
for pos in wait:
x, y = pos[0], pos[1]
searched.add((x, y))
if grid[x][y] == 0:
continue
island.add((x, y))
if x > 0:
if (x-1, y) not in searched:
if grid[x-1][y] == 1:
temp.add((x-1, y))
island.add((x-1, y))
if x + 1 < width:
if (x+1, y) not in searched:
if grid[x+1][y] == 1:
temp.add((x+1, y))
island.add((x-1, y))
if y > 0:
if (x, y-1) not in searched:
if grid[x][y-1] == 1:
temp.add((x, y-1))
island.add((x, y-1))
if y + 1 < height:
if (x, y+1) not in searched:
if grid[x+1][y] == 1:
temp.add((x, y+1))
island.add((x, y+1))
wait.clear()
wait.update(temp)
return island
if __name__ == "__main__":
s = Solution()
print(s.largestIsland([[1,0],[0,1]]))
|
Tsiannian/tsiannian.github.io
|
test.py
|
Python
|
mit
| 2,264
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.utils.functional import cached_property
from wagtail.wagtailcore.models import Page
from geokit_tables.models import GeoKitTable
from layers.models import Layer
from data import treeToNode, wrapJoins, wrapRasters, new_rpc_con
import json
class Variable(models.Model):
name = models.SlugField(max_length=75, blank=False)
description = models.TextField(null=True, blank=True)
temporal_domain = ArrayField(models.DateField(), null=True, blank=True)
spatial_domain = models.ForeignKey(Layer, null=True, blank=True)
saved_dimensions = models.CharField(max_length=15, null=True)
tree = JSONField()
input_variables = JSONField(null=True, default=[])
units = models.CharField(max_length=100, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True)
status = models.IntegerField(
choices=((0, 'Good'), (1, 'Working'), (3, 'Bad')), default=0
)
@cached_property
def root(self):
if not self.tree:
return None
else:
return treeToNode(self.tree)
def __init__(self, *args, **kwargs):
super(Variable, self).__init__(*args, **kwargs)
self.source_layers = None
self.current_data = None
def save(self, *args, **kwargs):
if self.tree:
self.tree = wrapJoins(self.tree)
self.tree = wrapRasters(self.tree)
try:
self.saved_dimensions = self.root.dimensions
except Exception as e:
print "Variable save, can't get dimension: {}".format(e)
self.saved_dimensions = None
return super(Variable, self).save(*args, **kwargs)
def __unicode__(self):
if self.saved_dimensions is None:
return self.name
return "{} {}{}".format(
self.name,
u'🌐' if 's' in self.saved_dimensions else '', # 🌐 S
u'🕐' if 't' in self.saved_dimensions else '' # 🕐 T
)
@cached_property
def dimensions(self):
return self.root.dimensions
@cached_property
def units(self):
'''
For now, assume that there is only one raster product, use its units.
TODO: Add `units` property to DataNode, do unit math to combine units
of all sources.
'''
rasters = self.get_rasters()
if not rasters:
return None
conn = new_rpc_con()
catalog = conn.get_catalog()
raster = list(rasters)[0].raster
units = [p['units'] for p in catalog if p['name'] == raster['id']][0]
return units
def tree_json(self):
return json.dumps(self.tree)
def input_variables_json(self):
return json.dumps(self.input_variables)
@cached_property
def layers(self):
return self.get_layers()
def get_layers(self):
return Layer.objects.filter(pk__in=self.root.get_layers())
@cached_property
def tables(self):
return self.get_tables
def get_tables(self):
return GeoKitTable.objects.filter(pk__in=self.root.get_tables())
@cached_property
def rasters(self):
return self.get_rasters()
def get_rasters(self):
return self.root.get_rasters()
def get_pages(self):
dependent_pages = set()
for page in Page.objects.all():
if page not in dependent_pages:
page_revision = page.get_latest_revision()
if page_revision:
page_content = json.loads(page_revision.content_json)
page_body = json.loads(page_content['body'])
for block in page_body:
if block['type'] == 'visualization':
for vis in block['value']['visualizations']:
if vis['variable'] == self.pk:
dependent_pages.add(page)
return dependent_pages
@cached_property
def bounds(self):
layer_ids = self.root.get_layers()
boxes = list(Layer.objects.filter(pk__in=layer_ids).values('bounds'))
if any(boxes):
lon_min = min([b['bounds'][0] for b in boxes])
lat_max = max([b['bounds'][1] for b in boxes])
lon_max = max([b['bounds'][2] for b in boxes])
lat_min = min([b['bounds'][3] for b in boxes])
else:
return None
class blist(list):
def __unicode__(self):
return ", ".join(map(lambda x: "{:.4f}".format(x), self))
return blist([lon_min, lat_max, lon_max, lat_min])
def data(self):
if self.current_data is None:
self.current_data = self.root.execute()
return self.current_data
class RasterRequest(models.Model):
raster_id = models.CharField(max_length=512)
dates = models.CharField(max_length=512)
vector = models.ForeignKey(Layer)
job_id = models.CharField(max_length=512)
status = models.IntegerField(
choices=((0, 'Good'), (1, 'Working'), (3, 'Bad')), default=1
)
class Meta:
unique_together = ('raster_id', 'dates', 'vector',)
|
Applied-GeoSolutions/geokit
|
variables/models.py
|
Python
|
gpl-2.0
| 5,389
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_nics
short_description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
version_added: "2.3"
author:
- Ondra Machacek (@machacekondra)
description:
- Module to manage network interfaces of Virtual Machines in oVirt/RHV.
options:
name:
description:
- Name of the network interface to manage.
required: true
vm:
description:
- Name of the Virtual Machine to manage.
- You must provide either C(vm) parameter or C(template) parameter.
template:
description:
- Name of the template to manage.
- You must provide either C(vm) parameter or C(template) parameter.
version_added: "2.4"
state:
description:
- Should the Virtual Machine NIC be present/absent/plugged/unplugged.
choices: [ absent, plugged, present, unplugged ]
default: present
network:
description:
- Logical network to which the VM network interface should use,
by default Empty network is used if network is not specified.
profile:
description:
- Virtual network interface profile to be attached to VM network interface.
interface:
description:
- "Type of the network interface."
- "It's required parameter when creating the new NIC."
choices: [ e1000, pci_passthrough, rtl8139, rtl8139_virtio, spapr_vlan, virtio ]
mac_address:
description:
- Custom MAC address of the network interface, by default it's obtained from MAC pool.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Add NIC to VM
ovirt_nics:
state: present
vm: myvm
name: mynic
interface: e1000
mac_address: 00:1a:4a:16:01:56
profile: ovirtmgmt
network: ovirtmgmt
- name: Plug NIC to VM
ovirt_nics:
state: plugged
vm: myvm
name: mynic
- name: Unplug NIC from VM
ovirt_nics:
state: unplugged
vm: myvm
name: mynic
- name: Add NIC to template
ovirt_nics:
auth: "{{ ovirt_auth }}"
state: present
template: my_template
name: nic1
interface: virtio
profile: ovirtmgmt
network: ovirtmgmt
- name: Remove NIC from VM
ovirt_nics:
state: absent
vm: myvm
name: mynic
'''
RETURN = '''
id:
description: ID of the network interface which is managed
returned: On success if network interface is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
nic:
description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
returned: On success if network interface is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class EntityNicsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(EntityNicsModule, self).__init__(*args, **kwargs)
self.vnic_id = None
@property
def vnic_id(self):
return self._vnic_id
@vnic_id.setter
def vnic_id(self, vnic_id):
self._vnic_id = vnic_id
def build_entity(self):
return otypes.Nic(
name=self._module.params.get('name'),
interface=otypes.NicInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
vnic_profile=otypes.VnicProfile(
id=self.vnic_id,
) if self.vnic_id else None,
mac=otypes.Mac(
address=self._module.params.get('mac_address')
) if self._module.params.get('mac_address') else None,
)
def update_check(self, entity):
if self._module.params.get('vm'):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
equal(self._module.params.get('mac_address'), entity.mac.address)
)
elif self._module.params.get('template'):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'plugged', 'present', 'unplugged']),
vm=dict(type='str'),
template=dict(type='str'),
name=dict(type='str', required=True),
interface=dict(type='str'),
profile=dict(type='str'),
network=dict(type='str'),
mac_address=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['vm', 'template']],
)
check_sdk(module)
try:
# Locate the service that manages the virtual machines and use it to
# search for the NIC:
auth = module.params.pop('auth')
connection = create_connection(auth)
entity_name = None
if module.params.get('vm'):
# Locate the VM, where we will manage NICs:
entity_name = module.params.get('vm')
collection_service = connection.system_service().vms_service()
elif module.params.get('template'):
entity_name = module.params.get('template')
collection_service = connection.system_service().templates_service()
# TODO: We have to modify the search_by_name function to accept raise_error=True/False,
entity = search_by_name(collection_service, entity_name)
if entity is None:
raise Exception("Vm/Template '%s' was not found." % entity_name)
service = collection_service.service(entity.id)
cluster_id = entity.cluster
nics_service = service.nics_service()
entitynics_module = EntityNicsModule(
connection=connection,
module=module,
service=nics_service,
)
# Find vNIC id of the network interface (if any):
profile = module.params.get('profile')
if profile and module.params['network']:
cluster_name = get_link_name(connection, cluster_id)
dcs_service = connection.system_service().data_centers_service()
dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
networks_service = dcs_service.service(dc.id).networks_service()
network = next(
(n for n in networks_service.list()
if n.name == module.params['network']),
None
)
if network is None:
raise Exception(
"Network '%s' was not found in datacenter '%s'." % (
module.params['network'],
dc.name
)
)
for vnic in connection.system_service().vnic_profiles_service().list():
if vnic.name == profile and vnic.network.id == network.id:
entitynics_module.vnic_id = vnic.id
# Handle appropriate action:
state = module.params['state']
if state == 'present':
ret = entitynics_module.create()
elif state == 'absent':
ret = entitynics_module.remove()
elif state == 'plugged':
entitynics_module.create()
ret = entitynics_module.action(
action='activate',
action_condition=lambda nic: not nic.plugged,
wait_condition=lambda nic: nic.plugged,
)
elif state == 'unplugged':
entitynics_module.create()
ret = entitynics_module.action(
action='deactivate',
action_condition=lambda nic: nic.plugged,
wait_condition=lambda nic: not nic.plugged,
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
jimi-c/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_nics.py
|
Python
|
gpl-3.0
| 9,114
|
# Generated by Django 2.2.13 on 2021-07-07 11:51
import logging
from django.conf import settings
from django.db import migrations
LIMIT_START_YEAR = 2021
logger = logging.getLogger(settings.DEFAULT_LOGGER)
def set_stage_dimona_to_true_for_internship_type(apps, schema_editor):
LearningUnitYear = apps.get_model('base', 'learningunityear')
all_internships_to_update = LearningUnitYear.objects.filter(
academic_year__year__gte=LIMIT_START_YEAR,
learning_container_year__container_type='INTERNSHIP'
).update(
stage_dimona=True
)
logger.info('Stage-Dimona field has been updated for {} Learning Unit Year'.format(all_internships_to_update))
class Migration(migrations.Migration):
dependencies = [
('base', '0589_learningunityear_stage_dimona'),
]
operations = [
migrations.RunPython(set_stage_dimona_to_true_for_internship_type, elidable=True),
]
|
uclouvain/OSIS-Louvain
|
base/migrations/0590_set_stage_dimona_for_stage_type.py
|
Python
|
agpl-3.0
| 928
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class QuotasAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def setUpClass(cls):
super(QuotasAdminNegativeTestJSON, cls).setUpClass()
cls.client = cls.os.quotas_client
cls.adm_client = cls.os_adm.quotas_client
cls.sg_client = cls.security_groups_client
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
self.assertRaises(exceptions.Unauthorized,
self.client.update_quota_set,
self.demo_tenant_id,
ram=0)
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
@test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_vcpu_quota = quota_set['cores']
vcpu_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set = self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
cores=vcpu_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_mem_quota = quota_set['ram']
mem_quota = 0 # Set the quota to zero to conserve resources
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
ram=mem_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type='gate')
def test_security_groups_exceed_limit(self):
# Negative test: Creation Security Groups over limit should FAIL
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_quota = quota_set['security_groups']
sg_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
security_groups=sg_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_groups=default_sg_quota)
# Check we cannot create anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.sg_client.create_security_group,
"sg-overlimit", "sg-desc")
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type=['negative', 'gate'])
def test_security_groups_rules_exceed_limit(self):
# Negative test: Creation of Security Group Rules should FAIL
# when we reach limit maxSecurityGroupRules
resp, quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_rules_quota = quota_set['security_group_rules']
sg_rules_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
security_group_rules=sg_rules_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_group_rules=default_sg_rules_quota)
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup =\
self.sg_client.create_security_group(s_name, s_description)
self.addCleanup(self.sg_client.delete_security_group,
securitygroup['id'])
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
self.sg_client.create_security_group_rule,
secgroup_id, ip_protocol, 1025, 1025)
class QuotasAdminNegativeTestXML(QuotasAdminNegativeTestJSON):
_interface = 'xml'
|
Mirantis/tempest
|
tempest/api/compute/admin/test_quotas_negative.py
|
Python
|
apache-2.0
| 7,143
|
#!/bin/python
# This file is part of INDXParse.
#
# Copyright 2011-13 Will Ballenthin <william.ballenthin@mandiant.com>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Version v.1.2
from .. import BinaryParser
from ..BinaryParser import Block
from ..BinaryParser import Nestable
class NULL_OBJECT(object):
def __init__(self):
super(NULL_OBJECT, self).__init__()
@staticmethod
def structure_size(buf, offset, parent):
return 0
def __len__(self):
return 0
null_object = NULL_OBJECT()
class SECURITY_DESCRIPTOR_CONTROL:
SE_OWNER_DEFAULTED = 1 << 0
SE_GROUP_DEFAULTED = 1 << 1
SE_DACL_PRESENT = 1 << 2
SE_DACL_DEFAULTED = 1 << 3
SE_SACL_PRESENT = 1 << 4
SE_SACL_DEFAULTED = 1 << 5
SE_SACL_UNUSED0 = 1 << 6
SE_SACL_UNUSED1 = 1 << 7
SE_DACL_AUTO_INHERIT_REQ = 1 << 8
SE_SACL_AUTO_INHERIT_REQ = 1 << 9
SE_DACL_AUTO_INHERITED = 1 << 10
SE_SACL_AUTO_INHERITED = 1 << 11
SE_DACL_PROTECTED = 1 << 12
SE_SACL_PROTECTED = 1 << 13
SE_RM_CONTROL_VALID = 1 << 14
SE_SELF_RELATIVE = 1 << 15
class SID_IDENTIFIER_AUTHORITY(Block, Nestable):
def __init__(self, buf, offset, parent):
super(SID_IDENTIFIER_AUTHORITY, self).__init__(buf, offset)
self.declare_field("word_be", "high_part", 0x0)
self.declare_field("dword_be", "low_part")
@staticmethod
def structure_size(buf, offset, parent):
return 6
def __len__(self):
return SID_IDENTIFIER_AUTHORITY.structure_size(self._buf, self.absolute_offset(0x0), None)
def __str__(self):
return "%s" % (self.high_part() << 32 + self.low_part())
class SID(Block, Nestable):
def __init__(self, buf, offset, parent):
super(SID, self).__init__(buf, offset)
self.declare_field("byte", "revision", 0x0)
self.declare_field("byte", "sub_authority_count")
self.declare_field(SID_IDENTIFIER_AUTHORITY, "identifier_authority")
self.declare_field("dword", "sub_authorities", count=self.sub_authority_count())
@staticmethod
def structure_size(buf, offset, parent):
sub_auth_count = BinaryParser.read_byte(buf, offset + 1)
auth_size = SID_IDENTIFIER_AUTHORITY.structure_size(buf, offset + 2, parent)
return 2 + auth_size + (sub_auth_count * 4)
def __len__(self):
return self._off_sub_authorities + (self.sub_authority_count() * 4)
def string(self):
ret = "S-%d-%s" % (self.revision(), self.identifier_authority())
for sub_auth in self.sub_authorities():
ret += "-%s" % (str(sub_auth))
return ret
class ACE_TYPES:
"""
One byte.
"""
ACCESS_MIN_MS_ACE_TYPE = 0
ACCESS_ALLOWED_ACE_TYPE = 0
ACCESS_DENIED_ACE_TYPE = 1
SYSTEM_AUDIT_ACE_TYPE = 2
SYSTEM_ALARM_ACE_TYPE = 3 # Not implemented as of Win2k.
ACCESS_MAX_MS_V2_ACE_TYPE = 3
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = 4
ACCESS_MAX_MS_V3_ACE_TYPE = 4
# The following are Win2k only.
ACCESS_MIN_MS_OBJECT_ACE_TYPE = 5
ACCESS_ALLOWED_OBJECT_ACE_TYPE = 5
ACCESS_DENIED_OBJECT_ACE_TYPE = 6
SYSTEM_AUDIT_OBJECT_ACE_TYPE = 7
SYSTEM_ALARM_OBJECT_ACE_TYPE = 8
ACCESS_MAX_MS_OBJECT_ACE_TYPE = 8
ACCESS_MAX_MS_V4_ACE_TYPE = 8
# This one is for WinNT/2k.
ACCESS_MAX_MS_ACE_TYPE = 8
class ACE_FLAGS:
"""
One byte.
"""
OBJECT_INHERIT_ACE = 0x01
CONTAINER_INHERIT_ACE = 0x02
NO_PROPAGATE_INHERIT_ACE = 0x04
INHERIT_ONLY_ACE = 0x08
INHERITED_ACE = 0x10 # Win2k only.
VALID_INHERIT_FLAGS = 0x1f
# The audit flags.
SUCCESSFUL_ACCESS_ACE_FLAG = 0x40
FAILED_ACCESS_ACE_FLAG = 0x80
class ACCESS_MASK:
"""
DWORD.
"""
FILE_READ_DATA = 0x00000001
FILE_LIST_DIRECTORY = 0x00000001
FILE_WRITE_DATA = 0x00000002
FILE_ADD_FILE = 0x00000002
FILE_APPEND_DATA = 0x00000004
FILE_ADD_SUBDIRECTORY = 0x00000004
FILE_READ_EA = 0x00000008
FILE_WRITE_EA = 0x00000010
FILE_EXECUTE = 0x00000020
FILE_TRAVERSE = 0x00000020
FILE_DELETE_CHILD = 0x00000040
FILE_READ_ATTRIBUTES = 0x00000080
FILE_WRITE_ATTRIBUTES = 0x00000100
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
SYNCHRONIZE = 0x00100000
STANDARD_RIGHTS_READ = 0x00020000
STANDARD_RIGHTS_WRITE = 0x00020000
STANDARD_RIGHTS_EXECUTE = 0x00020000
STANDARD_RIGHTS_REQUIRED = 0x000f0000
STANDARD_RIGHTS_ALL = 0x001f0000
ACCESS_SYSTEM_SECURITY = 0x01000000
MAXIMUM_ALLOWED = 0x02000000
GENERIC_ALL = 0x10000000
GENERIC_EXECUTE = 0x20000000
GENERIC_WRITE = 0x40000000
GENERIC_READ = 0x80000000
class ACE(Block):
def __init__(self, buf, offset, parent):
super(ACE, self).__init__(buf, offset)
self.declare_field("byte", "ace_type", 0x0)
self.declare_field("byte", "ace_flags")
@staticmethod
def get_ace(buf, offset, parent):
header = ACE(buf, offset, parent)
if header.ace_type() == ACE_TYPES.ACCESS_ALLOWED_ACE_TYPE:
return ACCESS_ALLOWED_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.ACCESS_DENIED_ACE_TYPE:
return ACCESS_DENIED_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.SYSTEM_AUDIT_ACE_TYPE:
return SYSTEM_AUDIT_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.SYSTEM_ALARM_ACE_TYPE:
return SYSTEM_ALARM_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.ACCESS_ALLOWED_OBJECT_ACE_TYPE:
return ACCESS_ALLOWED_OBJECT_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.ACCESS_DENIED_OBJECT_ACE_TYPE:
return ACCESS_DENIED_OBJECT_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.SYSTEM_AUDIT_OBJECT_ACE_TYPE:
return SYSTEM_AUDIT_OBJECT_ACE(buf, offset, parent)
elif header.ace_type() == ACE_TYPES.SYSTEM_ALARM_OBJECT_ACE_TYPE:
return SYSTEM_ALARM_OBJECT_ACE(buf, offset, parent)
else:
# TODO(wb): raise a custom exception type
raise BinaryParser.ParseException("unknown ACE type")
class StandardACE(ACE, Nestable):
def __init__(self, buf, offset, parent):
super(StandardACE, self).__init__(buf, offset, parent)
self.declare_field("word", "size", 0x2)
self.declare_field("dword", "access_mask")
self.declare_field(SID, "sid")
@staticmethod
def structure_size(buf, offset, parent):
return BinaryParser.read_word(buf, offset + 0x2)
def __len__(self):
return self.size()
class ACCESS_ALLOWED_ACE(StandardACE):
def __init__(self, buf, offset, parent):
super(ACCESS_ALLOWED_ACE, self).__init__(buf, offset, parent)
class ACCESS_DENIED_ACE(StandardACE):
def __init__(self, buf, offset, parent):
super(ACCESS_DENIED_ACE, self).__init__(buf, offset, parent)
class SYSTEM_AUDIT_ACE(StandardACE):
def __init__(self, buf, offset, parent):
super(SYSTEM_AUDIT_ACE, self).__init__(buf, offset, parent)
class SYSTEM_ALARM_ACE(StandardACE):
def __init__(self, buf, offset, parent):
super(SYSTEM_ALARM_ACE, self).__init__(buf, offset, parent)
class OBJECT_ACE_FLAGS:
"""
DWORD.
"""
ACE_OBJECT_TYPE_PRESENT = 1
ACE_INHERITED_OBJECT_TYPE_PRESENT = 2
class ObjectACE(ACE, Nestable):
def __init__(self, buf, offset, parent):
super(ObjectACE, self).__init__(buf, offset, parent)
self.declare_field("word", "size", 0x2)
self.declare_field("dword", "access_mask")
self.declare_field("dword", "object_flags")
self.declare_field("guid", "object_type")
self.declare_field("guid", "inherited_object_type")
@staticmethod
def structure_size(buf, offset, parent):
return BinaryParser.read_word(buf, offset + 0x2)
def __len__(self):
return self.size()
class ACCESS_ALLOWED_OBJECT_ACE(ObjectACE):
def __init__(self, buf, offset, parent):
super(ACCESS_ALLOWED_OBJECT_ACE, self).__init__(buf, offset, parent)
class ACCESS_DENIED_OBJECT_ACE(ObjectACE):
def __init__(self, buf, offset, parent):
super(ACCESS_DENIED_OBJECT_ACE, self).__init__(buf, offset, parent)
class SYSTEM_AUDIT_OBJECT_ACE(ObjectACE):
def __init__(self, buf, offset, parent):
super(SYSTEM_AUDIT_OBJECT_ACE, self).__init__(buf, offset, parent)
class SYSTEM_ALARM_OBJECT_ACE(ObjectACE):
def __init__(self, buf, offset, parent):
super(SYSTEM_ALARM_OBJECT_ACE, self).__init__(buf, offset, parent)
class ACL(Block, Nestable):
def __init__(self, buf, offset, parent):
super(ACL, self).__init__(buf, offset)
self.declare_field("byte", "revision", 0x0)
self.declare_field("byte", "alignment1")
self.declare_field("word", "size")
self.declare_field("word", "ace_count")
self.declare_field("word", "alignment2")
self._off_ACEs = self.current_field_offset()
self.add_explicit_field(self._off_ACEs, ACE, "ACEs")
@staticmethod
def structure_size(buf, offset, parent):
return BinaryParser.read_word(buf, offset + 0x2)
def __len__(self):
return self.size()
def ACEs(self):
ofs = self._off_ACEs
for _ in range(self.ace_count()):
a = ACE.get_ace(self._buf, self.offset() + ofs, self)
yield a
ofs += a.size()
ofs = BinaryParser.align(ofs, 4)
class NULL_ACL(object):
"""
TODO(wb): Not actually sure what the NULL ACL is...
just guessing at the values here.
"""
def __init__(self):
super(NULL_ACL, self).__init__()
def revision(self):
return 1
def alignment1(self):
return 0
def size(self):
return 0
def ace_count(self):
return 0
def ACEs(self):
return
@staticmethod
def structure_size(buf, offset, parent):
return 0
def __len__(self):
return 0
class SECURITY_DESCRIPTOR_RELATIVE(Block, Nestable):
def __init__(self, buf, offset, parent):
super(SECURITY_DESCRIPTOR_RELATIVE, self).__init__(buf, offset)
self.declare_field("byte", "revision", 0x0)
self.declare_field("byte", "alignment")
self.declare_field("word", "control")
self.declare_field("dword", "owner_offset")
self.declare_field("dword", "group_offset")
self.declare_field("dword", "sacl_offset")
self.declare_field("dword", "dacl_offset")
self.add_explicit_field(self.owner_offset(), "SID", "owner")
self.add_explicit_field(self.group_offset(), "SID", "group")
if self.control() & SECURITY_DESCRIPTOR_CONTROL.SE_SACL_PRESENT:
self.add_explicit_field(self.sacl_offset(), "ACL", "sacl")
if self.control() & SECURITY_DESCRIPTOR_CONTROL.SE_DACL_PRESENT:
self.add_explicit_field(self.dacl_offset(), "ACL", "dacl")
@staticmethod
def structure_size(buf, offset, parent):
return len(SECURITY_DESCRIPTOR_RELATIVE(buf, offset, parent))
def __len__(self):
ret = 20
ret += len((self.owner() or null_object))
ret += len((self.group() or null_object))
ret += len((self.sacl() or null_object))
ret += len((self.dacl() or null_object))
return ret
def owner(self):
if self.owner_offset() != 0:
return SID(self._buf, self.absolute_offset(self.owner_offset()), self)
else:
return None
def group(self):
if self.group_offset() != 0:
return SID(self._buf, self.absolute_offset(self.group_offset()), self)
else:
return None
def sacl(self):
if self.control() & SECURITY_DESCRIPTOR_CONTROL.SE_SACL_PRESENT:
if self.sacl_offset() > 0:
return ACL(self._buf, self.absolute_offset(self.sacl_offset()), self)
else:
return NULL_ACL()
else:
return None
def dacl(self):
if self.control() & SECURITY_DESCRIPTOR_CONTROL.SE_DACL_PRESENT:
if self.dacl_offset() > 0:
return ACL(self._buf, self.absolute_offset(self.dacl_offset()), self)
else:
return NULL_ACL()
else:
return None
class SDS_ENTRY(Block, Nestable):
def __init__(self, buf, offset, parent):
super(SDS_ENTRY, self).__init__(buf, offset)
self.declare_field("dword", "hash", 0x0)
self.declare_field("dword", "security_id")
self.declare_field("qword", "offset")
self.declare_field("dword", "length")
self.declare_field(SECURITY_DESCRIPTOR_RELATIVE, "sid")
@staticmethod
def structure_size(buf, offset, parent):
return BinaryParser.read_dword(buf, offset + 0x10)
def __len__(self):
return self.length()
class SDS(Block):
def __init__(self, buf, offset, parent):
super(SDS, self).__init__(buf, offset)
self.add_explicit_field(0, SDS, "sds_entries")
def sds_entries(self):
ofs = 0
while len(self._buf) > self.offset() + ofs + 0x14:
s = SDS_ENTRY(self._buf, self.offset() + ofs, self)
if len(s) != 0:
yield s
ofs += len(s)
ofs = BinaryParser.align(ofs, 0x10)
else:
if ofs % 0x10000 == 0:
return
else:
ofs = BinaryParser.align(ofs, 0x10000)
def main():
import sys
import mmap
import contextlib
with open(sys.argv[1], 'r') as f:
with contextlib.closing(mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ)) as buf:
s = SDS(buf, 0, None)
print "SDS"
for e in s.sds_entries():
print(" SDS_ENTRY")
print(e.get_all_string(indent=2))
if __name__ == "__main__":
main()
|
ohio813/python-ntfs
|
ntfs/secure/SDS.py
|
Python
|
apache-2.0
| 14,625
|
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from __future__ import absolute_import
import argparse
import getpass
import hashlib
import logging
import os
import pwd
import ssl
import sys
import tempfile
import requests
import selinux
from . import expose
from .. import utils
from vdsm.utils import getHostUUID, tobool
class Register(object):
def __init__(self, engine_fqdn, engine_https_port=None,
fingerprint=None, ssh_port=None,
ssh_user=None, check_fqdn=True,
vdsm_port=None, node_address=None,
vdsm_uuid=None, node_name=None):
"""
Attributes:
engine_fqdn - Engine FQDN or IP address
engine_https_port - Engine https port
fingeprint - Fingerprint to be validated
ssh_user - SSH user that will establish the connection
from Engine
ssh_port - Port of ssh daemon is running
check_fqdn - Validate Engine FQDN against CA (True or False)
Default is TRUE
vdsm_port - VDSM listen port
node_address - Specify node address or FQDN
node_name - Specify node name
vdsm_uuid - Provide host UUID to be used instead vdsm.utils.
Useful for hosts with blank or buggy DMI
"""
self.logger = self._set_logger()
self.logger.debug("=======================================")
self.logger.debug("Registering the node")
self.logger.debug("=======================================")
self.logger.debug("Received the following attributes:")
self.engine_fqdn = engine_fqdn
self.logger.debug("Engine FQDN: {fqdn}".format(fqdn=self.engine_fqdn))
self.engine_url = "https://{e}".format(e=engine_fqdn)
if engine_https_port is None:
self.engine_port = "443"
else:
self.engine_port = engine_https_port
self.engine_url = "https://{e}:{p}".format(e=self.engine_fqdn,
p=self.engine_port)
self.logger.debug("Engine URL: {url}".format(url=self.engine_url))
self.logger.debug("Engine https port: {hp}".format(
hp=self.engine_port))
if check_fqdn is None:
self.check_fqdn = True
else:
self.check_fqdn = tobool(check_fqdn)
self.logger.debug("Check FQDN: {v}".format(v=self.check_fqdn))
self.fprint = fingerprint
self.logger.debug("Fingerprint: {fp}".format(fp=self.fprint))
self.node_address = node_address
self.logger.debug("Node address: {nf}".format(nf=self.node_address))
self.node_name = node_name
self.logger.debug("Node name: {na}".format(na=self.node_name))
if ssh_user is None:
self.ssh_user = getpass.getuser()
else:
self.ssh_user = ssh_user
self.logger.debug("SSH User: {su}".format(su=self.ssh_user))
if ssh_port is None:
self.ssh_port = "22"
else:
self.ssh_port = ssh_port
self.logger.debug("SSH Port: {sp}".format(sp=self.ssh_port))
if vdsm_port is None:
self.vdsm_port = "54321"
else:
self.vdsm_port = vdsm_port
self.logger.debug("VDSM Port: {sp}".format(sp=self.vdsm_port))
self.vdsm_uuid = vdsm_uuid
self.logger.debug("VDSM UUID: {uuid_provided}".format(
uuid_provided=self.vdsm_uuid))
self.ca_dir = "/etc/pki/ovirt-engine/"
self.ca_engine = "{d}{f}".format(d=self.ca_dir, f="ca.pem")
self.logger.debug("Engine CA: {ca}".format(ca=self.ca_engine))
def handshake(self):
"""
Initial communication with Engine to validate
the registration.
"""
self.logger.info("Starting registration...")
ucmd = "/ovirt-engine/services/host-register?version=1&command="
__GET_VERSION = "https://{e}{u}{c}".format(e=self.engine_fqdn,
u=ucmd,
c="get-version")
self.logger.debug("Get version via: {0}".format(__GET_VERSION))
res = requests.get(__GET_VERSION, verify=False)
if res.status_code != 200:
raise RuntimeError("Cannot get registration version from Engine!")
self.url_CA = "{e}{uc}{c}".format(e=self.engine_url,
uc=ucmd,
c="get-pki-trust")
self.url_ssh_key = "{e}{uc}{c}".format(e=self.engine_url,
uc=ucmd,
c="get-ssh-trust")
ureg = "{uc}register&sshUser={sshu}&" \
"sshPort={sshp}&port={mp}".format(uc=ucmd,
sshu=self.ssh_user,
sshp=self.ssh_port,
mp=self.vdsm_port)
if self.node_name is not None:
ureg += "&name={name}".format(name=self.node_name)
if self.node_address is not None:
ureg += "&address={addr}".format(addr=self.node_address)
self.url_reg = "{e}{u}".format(e=self.engine_url, u=ureg)
self.logger.debug("Download CA via: {u}".format(u=self.url_CA))
self.logger.debug("Download SSH via: {u}".format(u=self.url_ssh_key))
def _set_logger(self):
"""
The logging settings
Saving log in: /var/log/vdsm/register.log
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.FileHandler("/var/log/vdsm/register.log")
fh.setLevel(logging.DEBUG)
debug_fmt = logging.Formatter("%(asctime)s %(message)s",
"%m/%d/%Y %I:%M:%S %p")
ih = logging.StreamHandler(stream=sys.stdout)
ih.setLevel(logging.INFO)
info_fmt = logging.Formatter("%(message)s",
"%m/%d/%Y %I:%M:%S %p")
fh.setFormatter(debug_fmt)
ih.setFormatter(info_fmt)
logger.addHandler(fh)
logger.addHandler(ih)
logging.captureWarnings(True)
return logger
def _execute_http_request(self, url, cert_validation=True):
"""
Execute http requests
url -- URL to be requested
cert_validation -- SSL cert will be verified
Returns: Content of http request
"""
if self.check_fqdn:
cert_validation = self.ca_engine
else:
cert_validation = False
res = requests.get("{u}".format(u=url), verify=cert_validation)
if res.status_code != 200:
raise requests.RequestException(
"http response was non OK, code {r}".format(r=res.status_code)
)
return res.content
def _silent_restorecon(self, path):
"""
Execute selinux restorecon cmd to determined file
Args
path -- full path to file
"""
try:
if selinux.is_selinux_enabled():
selinux.restorecon(path)
except:
self.logger.error("restorecon %s failed" % path, exc_info=True)
def _calculate_fingerprint(self, cert):
"""
Calculate fingerprint of certificate
Args
cert -- certificate file to be calculated the fingerprint
Returns
The fingerprint
"""
with open(cert, 'r') as f:
cert = f.read()
fp = hashlib.sha1(ssl.PEM_cert_to_DER_cert(cert)).hexdigest()
fp = ':'.join(fp[pos:pos + 2] for pos in range(0, len(fp), 2))
return fp
def host_uuid(self):
"""
Determine host UUID and if there is no existing /etc/vdsm/vdsm.id
it will genereate UUID and save/persist in /etc/vdsm/vdsm.id
"""
if self.vdsm_uuid:
self.uuid = self.vdsm_uuid
else:
self.uuid = getHostUUID(legacy=False)
self.url_reg += "&uniqueId={u}".format(u=self.uuid)
self.logger.debug("Registration via: {u}".format(u=self.url_reg))
__VDSM_ID = "/etc/vdsm/vdsm.id"
if self.vdsm_uuid and os.path.exists(__VDSM_ID):
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().unpersist(__VDSM_ID)
os.unlink(__VDSM_ID)
if not os.path.exists(__VDSM_ID):
with open(__VDSM_ID, 'w') as f:
f.write(self.uuid)
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().persist(__VDSM_ID)
self.logger.info("Host UUID: {u}".format(u=self.uuid))
def download_ca(self):
"""
Download CA from Engine and save self.ca_engine
"""
self.logger.info("Collecting CA data from Engine...")
# If engine CA dir doesnt exist create it and download the ca.pem
temp_ca_file = None
if os.path.exists(self.ca_engine):
calculated_fprint = self._calculate_fingerprint(self.ca_engine)
else:
if not os.path.exists(self.ca_dir):
os.makedirs(self.ca_dir, 0o755)
self._silent_restorecon(self.ca_dir)
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().persist(self.ca_dir)
res = self._execute_http_request(self.url_CA,
cert_validation=False)
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(self.ca_dir),
delete=False
) as f:
f.write(res)
calculated_fprint = self._calculate_fingerprint(f.name)
temp_ca_file = True
if self.fprint and self.fprint.lower() != calculated_fprint.lower():
msg = "The fingeprints doesn't match:\n" \
"Calculated fingerprint: [{c}]\n" \
"Attribute fingerprint: [{a}]".format(c=calculated_fprint,
a=self.fprint)
self.logger.debug(msg)
if temp_ca_file:
os.unlink(f.name)
raise RuntimeError(msg)
if temp_ca_file:
os.rename(f.name, self.ca_engine)
self.fprint = calculated_fprint
self.logger.info("Calculated fingerprint: {f}".format(
f=self.fprint))
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().persist(self.ca_engine)
def download_ssh(self):
"""
Download ssh authorized keys and save it in the node
"""
self.logger.info("Collecting ssh pub key data...")
_uid = pwd.getpwnam(self.ssh_user).pw_uid
_auth_keys_dir = pwd.getpwuid(_uid).pw_dir + "/.ssh"
_auth_keys = _auth_keys_dir + "/authorized_keys"
self.logger.debug("auth_key is located {f}".format(f=_auth_keys))
if not os.path.exists(_auth_keys_dir):
os.makedirs(_auth_keys_dir, 0o700)
self._silent_restorecon(_auth_keys_dir)
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().persist(_auth_keys_dir)
os.chown(_auth_keys_dir, _uid, _uid)
res = self._execute_http_request(self.url_ssh_key)
with tempfile.NamedTemporaryFile(
dir=_auth_keys_dir,
delete=False
) as f:
f.write(res)
# If ssh key is new append it into autorized_keys
with open(f.name, "r") as f_ro:
content = f_ro.read()
with open(_auth_keys, "a+") as f_w:
if content not in f_w.read():
f_w.write(content)
os.chmod(_auth_keys, 0o600)
self._silent_restorecon(_auth_keys)
os.chown(_auth_keys, _uid, _uid)
os.unlink(f.name)
if utils.isOvirtNode():
from ovirt.node.utils.fs import Config
Config().persist(_auth_keys)
def execute_registration(self):
"""
Trigger the registration command against Engine
"""
self._execute_http_request(self.url_reg)
self.logger.info("Registration completed, host is pending approval"
" on Engine: {e}".format(e=self.engine_fqdn))
@expose("register")
def main(*args):
'''
A tool which register the node against Engine
Note: This comment is required by vdsm-tool which
looks for a doc string.
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Tool to register node to Engine',
epilog='Example of use:\n%(prog)s '
'--engine-fqdn engine.mydomain'
)
parser.add_argument(
'--node-address',
help="Define node FQDN or IP address."
" If not provided, will be used system host name",
)
parser.add_argument(
'--node-name',
help="Define node name."
" If not provided, will be used system short host name"
" (the name before the first dot in the system host name)",
)
parser.add_argument(
'--engine-fqdn',
help="Engine FQDN or IP address (See also: --check-fqdn)",
required=True
)
parser.add_argument(
'--engine-https-port',
help="Define engine https port."
" If not provided, will be used 443",
)
parser.add_argument(
'--ssh-user',
help="SSH username to establish the connection with Engine. "
"If not provided, the user which is "
"executing the script will catch and used",
)
parser.add_argument(
'--ssh-port',
help="SSH port to establish the connection with Engine "
"If not provided, the script will use the default "
"SSH port 22"
)
parser.add_argument(
'--check-fqdn',
help="Disable or Enable FQDN check for Engine CA, this option "
"is enabled by default (Use: True or False)",
)
parser.add_argument(
'--fingerprint',
help="Specify an existing fingerprint to be validated against "
"Engine CA fingerprint",
)
parser.add_argument(
'--vdsm-port',
help="Specify the listen port of VDSM"
" If not provided, will be used the default 54321",
)
parser.add_argument(
'--vdsm-uuid',
help="Provide host UUID to be used instead vdsm.utils"
" Useful for hosts with blank or buggy DMI",
)
# Using [1:] to remove the 'register' option from arguments
# and avoid vdsm-tool recognize it as an unknown option
args = parser.parse_args(args=args[1:])
reg = Register(engine_fqdn=args.engine_fqdn,
engine_https_port=args.engine_https_port,
vdsm_port=args.vdsm_port,
node_address=args.node_address,
node_name=args.node_name,
ssh_user=args.ssh_user,
ssh_port=args.ssh_port,
fingerprint=args.fingerprint,
check_fqdn=args.check_fqdn,
vdsm_uuid=args.vdsm_uuid)
try:
reg.handshake()
reg.host_uuid()
reg.download_ca()
reg.download_ssh()
reg.execute_registration()
except:
reg.logger.exception("Cannot connect to engine. {f} matches "
"the FQDN of Engine?".format(f=args.engine_fqdn))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
"""
Registration schema:
UUID
=========
- If there is UUID already generated for the system will be
available in /etc/vdsm/vdsm.id
- In case, there is no UUID, use auxiliary function from VDSM
to generate it and store in /etc/vdsm/vdsm.id
Service reg:
============
- REQUIRED_FOR: Engine >= 3.4
- Process UUID
- Download CA via get-pki-trust URL
https://ENGINE_FQDN/ovirt-engine/services/host-register?version=1
&command=get-pki-trust
- Download ssh pub key via get-ssh-trust URL
https://ENGINE_FQDN/ovirt-engine/services/host-register?version=1
&command=get-ssh-trust
- Register via URL:
https://ENGINE_FQDN/ovirt-engine/services/host-register?version=1
&command=register&name=NODE_NAME&address=NO_FQDN_OR_IP
&uniqueId=NODE_UUID&sshUser=SSH_USERNAME&sshPort=SSHD_PORT
"""
|
borisroman/vdsm
|
lib/vdsm/tool/register.py
|
Python
|
gpl-2.0
| 17,682
|
"""
Class for reading the configuration file
Uses the ConfigParser lib to return the values present in the config file
"""
__authors__ = "Claudio Marques, David Palma, Luis Cordeiro"
__copyright__ = "Copyright (c) 2014 OneSource Consultoria Informatica, Lda"
__license__ = "Apache 2"
__contact__ = "www.onesource.pt"
__date__ = "01/09/2014"
__version__ = "1.0"
import ConfigParser
class ReadConfFile:
config = None
def __init__(self, file="proxy.conf"):
"""
Method to read from conf file specific options
:param file:
"""
self.config = ConfigParser.SafeConfigParser()
self.config.readfp(open(file))
def read_option(self, group, name):
"""
:return:
"""
value = self.config.get(group, name)
return value
|
OneSourceConsult/ZabbixCeilometer-Proxy
|
readFile.py
|
Python
|
apache-2.0
| 812
|
"""Test icatdump and icatingest.
"""
from subprocess import CalledProcessError
import pytest
import icat
import icat.config
from icat.query import Query
from conftest import DummyDatafile, gettestdata, getConfig, callscript
# Test input
ds_params = str(gettestdata("ingest-ds-params.xml"))
datafiles = str(gettestdata("ingest-datafiles.xml"))
@pytest.fixture(scope="module")
def client(setupicat):
client, conf = getConfig(confSection="acord", ids="mandatory")
client.login(conf.auth, conf.credentials)
return client
@pytest.fixture(scope="module")
def cmdargs(setupicat):
_, conf = getConfig(confSection="acord", ids="mandatory")
return conf.cmdargs + ["-f", "XML"]
@pytest.fixture(scope="function")
def dataset(client):
"""A dataset to be used in the test.
The dataset is not created by the fixture, it is assumed that the
test does it. The dataset will be eventually be deleted after the
test.
"""
inv = client.assertedSearch("Investigation [name='10100601-ST']")[0]
dstype = client.assertedSearch("DatasetType [name='raw']")[0]
dataset = client.new("dataset",
name="e208343", complete=False,
investigation=inv, type=dstype)
yield dataset
try:
ds = client.searchMatching(dataset)
dataset.id = ds.id
except icat.SearchResultError:
# Dataset not found, maybe the test failed, nothing to
# clean up then.
pass
else:
# If any datafile has been uploaded (i.e. the location is
# not NULL), need to delete it from IDS first. Any other
# datafile or dataset parameter will be deleted
# automatically with the dataset by cascading in the ICAT
# server.
query = Query(client, "Datafile",
conditions={"dataset.id": "= %d" % dataset.id,
"location": "IS NOT NULL"})
client.deleteData(client.search(query))
client.delete(dataset)
# Test datafiles to be created by test_ingest_datafiles:
testdatafiles = [
{
'dfname': "e208343.dat",
'size': 394,
'mtime': 1286600400,
},
{
'dfname': "e208343.nxs",
'size': 52857,
'mtime': 1286600400,
},
]
def verify_dataset_params(client, dataset, params):
query = Query(client, "DatasetParameter",
conditions={"dataset.id": "= %d" % dataset.id},
includes={"type"})
ps = client.search(query)
assert len(ps) == len(params)
values = { (p.type.name, p.numericValue, p.type.units) for p in ps }
assert values == params
def test_ingest_dataset_params(client, dataset, cmdargs):
"""Ingest a file setting some dataset parameters.
"""
dataset.create()
args = cmdargs + ["-i", ds_params]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_throw(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now place a duplicate object in the way.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params]
# FIXME: should inspect stderr and verify ICATObjectExistsError.
with pytest.raises(CalledProcessError) as err:
callscript("icatingest.py", args)
# Verify that the params have been set. The exceptions should
# have been raised while trying to ingest the second parameter.
# The first one (Magnetic field) should have been created and
# Reactor power should still have the value set above.
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW")
})
def test_ingest_duplicate_ignore(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now ignore the duplicate.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "IGNORE"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_check_err(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but use CHECK which fails due to mismatch.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "CHECK"]
# FIXME: should inspect stderr and verify ICATObjectExistsError.
with pytest.raises(CalledProcessError) as err:
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW")
})
def test_ingest_duplicate_check_ok(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now it matches, so CHECK should return ok.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=10.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "CHECK"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_overwrite(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now overwrite the old value.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "OVERWRITE"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
# Minimal example, a Datafile featuring a string.
ingest_data_string = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datafile>
<name>dup_test_str.dat</name>
<dataset ref="Dataset_001"/>
</datafile>
</data>
</icatdata>
"""
# A Datafile featuring an int.
ingest_data_int = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datafile>
<fileSize>42</fileSize>
<name>dup_test_int.dat</name>
<dataset ref="Dataset_001"/>
</datafile>
</data>
</icatdata>
"""
# A Dataset featuring a boolean.
ingest_data_boolean = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<dataset id="Dataset_001">
<complete>false</complete>
<name>e208343</name>
<investigation name="10100601-ST" visitId="1.1-N"/>
<type name="raw"/>
</dataset>
</data>
</icatdata>
"""
# A DatasetParameter featuring a float.
ingest_data_float = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datasetParameter>
<numericValue>5.3</numericValue>
<dataset ref="Dataset_001"/>
<type name="Magnetic field" units="T"/>
</datasetParameter>
</data>
</icatdata>
"""
# A Datafile featuring a date.
ingest_data_date = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datafile>
<datafileCreateTime>2008-06-18T09:31:11+02:00</datafileCreateTime>
<name>dup_test_date.dat</name>
<dataset ref="Dataset_001"/>
</datafile>
</data>
</icatdata>
"""
@pytest.mark.parametrize("inputdata", [
ingest_data_string,
ingest_data_int,
ingest_data_boolean,
ingest_data_float,
ingest_data_date,
])
def test_ingest_duplicate_check_types(tmpdirsec, dataset, cmdargs, inputdata):
"""Ingest with a collision of a duplicate object.
Similar to test_ingest_duplicate_check_ok(), but trying several
input datasets that test different data types. Issue #9.
"""
# Most input data create a datafile or a dataset parameter related
# to dataset and thus assume the dataset to already exist. Only
# ingest_data_boolean creates the dataset itself.
if inputdata is not ingest_data_boolean:
dataset.create()
# We simply ingest twice the same data, using duplicate=CHECK the
# second time. This obviously leads to matching duplicates.
inpfile = tmpdirsec / "ingest.xml"
with inpfile.open("wt") as f:
f.write(inputdata)
args = cmdargs + ["-i", str(inpfile)]
callscript("icatingest.py", args)
callscript("icatingest.py", args + ["--duplicate", "CHECK"])
def test_ingest_datafiles(tmpdirsec, client, dataset, cmdargs):
"""Ingest a dataset with some datafiles.
"""
dummyfiles = [ f['dfname'] for f in testdatafiles ]
args = cmdargs + ["-i", datafiles]
callscript("icatingest.py", args)
# Verify that the datafiles have been uploaded.
dataset = client.searchMatching(dataset)
for fname in dummyfiles:
query = Query(client, "Datafile", conditions={
"name": "= '%s'" % fname,
"dataset.id": "= %d" % dataset.id,
})
df = client.assertedSearch(query)[0]
assert df.location is None
def test_ingest_datafiles_upload(tmpdirsec, client, dataset, cmdargs):
"""Upload datafiles to IDS from icatingest.
Same as last test, but set the --upload-datafiles flag so that
icatingest will not create the datafiles as objects in the ICAT,
but upload the files to IDS instead.
"""
dummyfiles = [ DummyDatafile(tmpdirsec, f['dfname'], f['size'], f['mtime'])
for f in testdatafiles ]
args = cmdargs + ["-i", datafiles, "--upload-datafiles",
"--datafile-dir", str(tmpdirsec)]
callscript("icatingest.py", args)
# Verify that the datafiles have been uploaded.
dataset = client.searchMatching(dataset)
for f in dummyfiles:
query = Query(client, "Datafile", conditions={
"name": "= '%s'" % f.name,
"dataset.id": "= %d" % dataset.id,
})
df = client.assertedSearch(query)[0]
assert df.location is not None
assert df.fileSize == f.size
assert df.checksum == f.crc32
if f.mtime:
assert df.datafileModTime == f.mtime
|
icatproject/python-icat
|
tests/test_06_ingest.py
|
Python
|
apache-2.0
| 11,631
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_hostlink import CommandShowHostlink
class CommandShowHostlinkHostlink(CommandShowHostlink):
required_parameters = ["hostlink"]
|
stdweird/aquilon
|
lib/python2.6/aquilon/worker/commands/show_hostlink_hostlink.py
|
Python
|
apache-2.0
| 951
|
import time
import uuid
import six
from tooz import coordination
ALIVE_TIME = 1
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a group
group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii'))
request = coordinator.create_group(group)
request.get()
# Join a group
request = coordinator.join_group(group)
request.get()
def when_i_am_elected_leader(event):
# event is a LeaderElected event
print(event.group_id, event.member_id)
# Propose to be a leader for the group
coordinator.watch_elected_as_leader(group, when_i_am_elected_leader)
start = time.time()
while time.time() - start < ALIVE_TIME:
coordinator.heartbeat()
coordinator.run_watchers()
time.sleep(0.1)
coordinator.stop()
|
citrix-openstack-build/tooz
|
examples/leader_election.py
|
Python
|
apache-2.0
| 770
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_snapshot_info
short_description: Gather information about ec2 volume snapshots in AWS
description:
- Gather information about ec2 volume snapshots in AWS
- This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
snapshot_ids:
description:
- If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
required: false
default: []
owner_ids:
description:
- If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
access are returned.
required: false
default: []
restorable_by_user_ids:
description:
- If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
returned.
required: false
default: []
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
names and values are case sensitive.
required: false
default: {}
notes:
- By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
the account use the filter 'owner-id'.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all snapshots, including public ones
- ec2_snapshot_info:
# Gather information about all snapshots owned by the account 0123456789
- ec2_snapshot_info:
filters:
owner-id: 0123456789
# Or alternatively...
- ec2_snapshot_info:
owner_ids:
- 0123456789
# Gather information about a particular snapshot using ID
- ec2_snapshot_info:
filters:
snapshot-id: snap-00112233
# Or alternatively...
- ec2_snapshot_info:
snapshot_ids:
- snap-00112233
# Gather information about any snapshot with a tag key Name and value Example
- ec2_snapshot_info:
filters:
"tag:Name": Example
# Gather information about any snapshot with an error status
- ec2_snapshot_info:
filters:
status: error
'''
RETURN = '''
snapshot_id:
description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
type: str
returned: always
sample: snap-01234567
volume_id:
description: The ID of the volume that was used to create the snapshot.
type: str
returned: always
sample: vol-01234567
state:
description: The snapshot state (completed, pending or error).
type: str
returned: always
sample: completed
state_message:
description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
error occurred.
type: str
returned: always
sample:
start_time:
description: The time stamp when the snapshot was initiated.
type: str
returned: always
sample: "2015-02-12T02:14:02+00:00"
progress:
description: The progress of the snapshot, as a percentage.
type: str
returned: always
sample: "100%"
owner_id:
description: The AWS account ID of the EBS snapshot owner.
type: str
returned: always
sample: "099720109477"
description:
description: The description for the snapshot.
type: str
returned: always
sample: "My important backup"
volume_size:
description: The size of the volume, in GiB.
type: int
returned: always
sample: 8
owner_alias:
description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
type: str
returned: always
sample: "033440102211"
tags:
description: Any tags assigned to the snapshot.
type: dict
returned: always
sample: "{ 'my_tag_key': 'my_tag_value' }"
encrypted:
description: Indicates whether the snapshot is encrypted.
type: bool
returned: always
sample: "True"
kms_key_id:
description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
protect the volume encryption key for the parent volume.
type: str
returned: always
sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
data_encryption_key_id:
description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
type: str
returned: always
sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def list_ec2_snapshots(connection, module):
snapshot_ids = module.params.get("snapshot_ids")
owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
if len(snapshot_ids) > 1:
module.warn("Some of your snapshots may exist, but %s" % str(e))
snapshots = {'Snapshots': []}
else:
module.fail_json(msg="Failed to describe snapshots: %s" % str(e))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_snapshots = []
for snapshot in snapshots['Snapshots']:
snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
# Turn the boto3 result in to ansible friendly tag dictionary
for snapshot in snaked_snapshots:
if 'tags' in snapshot:
snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
module.exit_json(snapshots=snaked_snapshots)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
snapshot_ids=dict(default=[], type='list'),
owner_ids=dict(default=[], type='list'),
restorable_by_user_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
]
)
if module._name == 'ec2_snapshot_facts':
module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_snapshots(connection, module)
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py
|
Python
|
mit
| 8,441
|
#!/usr/bin/env python
#
# kdump.py - Copyright (C) 2010 Red Hat, Inc.
# Written by Joey Boggs <jboggs@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import ovirtnode.ovirtfunctions as _functions
def write_kdump_config(config, type="net"):
assert type in ["nfs", "ssh", "net"]
kdump_config_file = open("/etc/kdump.conf", "w")
kdump_config_file.write("default reboot\n")
# adds a 60 sec delay to make sure the nic is up
kdump_config_file.write(type + " " + config + "\n")
kdump_config_file.close()
_functions.ovirt_store_config("/etc/kdump.conf")
return True
def restore_kdump_config():
kdump_config_file = open("/etc/kdump.conf", "w")
kdump_config_file.write("default reboot\n")
kdump_config_file.write("ext4 /dev/HostVG/Data\n")
kdump_config_file.write("path /core\n")
kdump_config_file.close()
return True
def kdump_auto():
try:
if "OVIRT_KDUMP_NFS" in _functions.OVIRT_VARS:
write_kdump_config(_functions.OVIRT_VARS["OVIRT_KDUMP_NFS"], "nfs")
_functions.ovirt_store_config("/etc/kdump.conf")
_functions.logger.info("Syslog Configuration Completed")
return True
except:
_functions.logger.error("KDump Configuration Failed")
|
haiyangd/python-porject
|
src/ovirtnode/kdump.py
|
Python
|
gpl-2.0
| 1,990
|
"""Package to test the get_accessory method."""
from unittest.mock import patch, Mock
import pytest
from homeassistant.core import State
import homeassistant.components.cover as cover
import homeassistant.components.climate as climate
import homeassistant.components.media_player as media_player
from homeassistant.components.homekit import get_accessory, TYPES
from homeassistant.components.homekit.const import (
CONF_FEATURE_LIST, FEATURE_ON_OFF, TYPE_OUTLET, TYPE_SWITCH)
from homeassistant.const import (
ATTR_CODE, ATTR_DEVICE_CLASS, ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT, CONF_NAME, CONF_TYPE, TEMP_CELSIUS,
TEMP_FAHRENHEIT)
def test_not_supported(caplog):
"""Test if none is returned if entity isn't supported."""
# not supported entity
assert get_accessory(None, None, State('demo.demo', 'on'), 2, {}) \
is None
# invalid aid
assert get_accessory(None, None, State('light.demo', 'on'), None, None) \
is None
assert caplog.records[0].levelname == 'WARNING'
assert 'invalid aid' in caplog.records[0].msg
def test_not_supported_media_player():
"""Test if mode isn't supported and if no supported modes."""
# selected mode for entity not supported
config = {CONF_FEATURE_LIST: {FEATURE_ON_OFF: None}}
entity_state = State('media_player.demo', 'on')
assert get_accessory(None, None, entity_state, 2, config) is None
# no supported modes for entity
entity_state = State('media_player.demo', 'on')
assert get_accessory(None, None, entity_state, 2, {}) is None
@pytest.mark.parametrize('config, name', [
({CONF_NAME: 'Customize Name'}, 'Customize Name'),
])
def test_customize_options(config, name):
"""Test with customized options."""
mock_type = Mock()
with patch.dict(TYPES, {'Light': mock_type}):
entity_state = State('light.demo', 'on')
get_accessory(None, None, entity_state, 2, config)
mock_type.assert_called_with(None, None, name,
'light.demo', 2, config)
@pytest.mark.parametrize('type_name, entity_id, state, attrs, config', [
('Fan', 'fan.test', 'on', {}, {}),
('Light', 'light.test', 'on', {}, {}),
('Lock', 'lock.test', 'locked', {}, {ATTR_CODE: '1234'}),
('MediaPlayer', 'media_player.test', 'on',
{ATTR_SUPPORTED_FEATURES: media_player.SUPPORT_TURN_ON |
media_player.SUPPORT_TURN_OFF}, {CONF_FEATURE_LIST:
{FEATURE_ON_OFF: None}}),
('SecuritySystem', 'alarm_control_panel.test', 'armed_away', {},
{ATTR_CODE: '1234'}),
('Thermostat', 'climate.test', 'auto', {}, {}),
('Thermostat', 'climate.test', 'auto',
{ATTR_SUPPORTED_FEATURES: climate.SUPPORT_TARGET_TEMPERATURE_LOW |
climate.SUPPORT_TARGET_TEMPERATURE_HIGH}, {}),
])
def test_types(type_name, entity_id, state, attrs, config):
"""Test if types are associated correctly."""
mock_type = Mock()
with patch.dict(TYPES, {type_name: mock_type}):
entity_state = State(entity_id, state, attrs)
get_accessory(None, None, entity_state, 2, config)
assert mock_type.called
if config:
assert mock_type.call_args[0][-1] == config
@pytest.mark.parametrize('type_name, entity_id, state, attrs', [
('GarageDoorOpener', 'cover.garage_door', 'open',
{ATTR_DEVICE_CLASS: 'garage',
ATTR_SUPPORTED_FEATURES: cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE}),
('WindowCovering', 'cover.set_position', 'open',
{ATTR_SUPPORTED_FEATURES: 4}),
('WindowCoveringBasic', 'cover.open_window', 'open',
{ATTR_SUPPORTED_FEATURES: 3}),
])
def test_type_covers(type_name, entity_id, state, attrs):
"""Test if cover types are associated correctly."""
mock_type = Mock()
with patch.dict(TYPES, {type_name: mock_type}):
entity_state = State(entity_id, state, attrs)
get_accessory(None, None, entity_state, 2, {})
assert mock_type.called
@pytest.mark.parametrize('type_name, entity_id, state, attrs', [
('BinarySensor', 'binary_sensor.opening', 'on',
{ATTR_DEVICE_CLASS: 'opening'}),
('BinarySensor', 'device_tracker.someone', 'not_home', {}),
('AirQualitySensor', 'sensor.air_quality_pm25', '40', {}),
('AirQualitySensor', 'sensor.air_quality', '40',
{ATTR_DEVICE_CLASS: 'pm25'}),
('CarbonMonoxideSensor', 'sensor.airmeter', '2',
{ATTR_DEVICE_CLASS: 'co'}),
('CarbonDioxideSensor', 'sensor.airmeter_co2', '500', {}),
('CarbonDioxideSensor', 'sensor.airmeter', '500',
{ATTR_DEVICE_CLASS: 'co2'}),
('HumiditySensor', 'sensor.humidity', '20',
{ATTR_DEVICE_CLASS: 'humidity', ATTR_UNIT_OF_MEASUREMENT: '%'}),
('LightSensor', 'sensor.light', '900', {ATTR_DEVICE_CLASS: 'illuminance'}),
('LightSensor', 'sensor.light', '900', {ATTR_UNIT_OF_MEASUREMENT: 'lm'}),
('LightSensor', 'sensor.light', '900', {ATTR_UNIT_OF_MEASUREMENT: 'lx'}),
('TemperatureSensor', 'sensor.temperature', '23',
{ATTR_DEVICE_CLASS: 'temperature'}),
('TemperatureSensor', 'sensor.temperature', '23',
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}),
('TemperatureSensor', 'sensor.temperature', '74',
{ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT}),
])
def test_type_sensors(type_name, entity_id, state, attrs):
"""Test if sensor types are associated correctly."""
mock_type = Mock()
with patch.dict(TYPES, {type_name: mock_type}):
entity_state = State(entity_id, state, attrs)
get_accessory(None, None, entity_state, 2, {})
assert mock_type.called
@pytest.mark.parametrize('type_name, entity_id, state, attrs, config', [
('Outlet', 'switch.test', 'on', {}, {CONF_TYPE: TYPE_OUTLET}),
('Switch', 'automation.test', 'on', {}, {}),
('Switch', 'input_boolean.test', 'on', {}, {}),
('Switch', 'remote.test', 'on', {}, {}),
('Switch', 'script.test', 'on', {}, {}),
('Switch', 'switch.test', 'on', {}, {}),
('Switch', 'switch.test', 'on', {}, {CONF_TYPE: TYPE_SWITCH}),
])
def test_type_switches(type_name, entity_id, state, attrs, config):
"""Test if switch types are associated correctly."""
mock_type = Mock()
with patch.dict(TYPES, {type_name: mock_type}):
entity_state = State(entity_id, state, attrs)
get_accessory(None, None, entity_state, 2, config)
assert mock_type.called
|
persandstrom/home-assistant
|
tests/components/homekit/test_get_accessories.py
|
Python
|
apache-2.0
| 6,341
|
# -*- coding: utf-8 -*-
"""Installer for the vfu.sitecontent package."""
from setuptools import find_packages
from setuptools import setup
import os
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = read('README.rst')
setup(
name='vfu.sitecontent',
version='1.0.0',
description="Sitecontent package containing folderish content pages",
long_description=long_description,
# Get more from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Plone Dexterity',
author='Serge Davidov',
author_email='sd@kreativkombinat.de',
url='http://pypi.python.org/pypi/vfu.sitecontent',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['vfu'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.api',
'plone.app.dexterity [relations]',
'plone.app.relationfield',
'plone.namedfile [blobs]',
'plone.formwidget.contenttree',
],
extras_require={
'test': [
'mock',
'plone.app.testing',
'unittest2',
],
'develop': [
'coverage',
'flake8',
'jarn.mkrelease',
'plone.app.debugtoolbar',
'plone.reload',
'Products.Clouseau',
'Products.DocFinderTab',
'Products.PDBDebugMode',
'Products.PrintingMailHost',
'Sphinx',
'zest.releaser',
'zptlint',
],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
|
a25kk/vfu
|
src/vfu.sitecontent/setup.py
|
Python
|
mit
| 1,772
|
# -*- coding: utf-8 -*-
"""The widget of 'Structure' tab."""
from __future__ import annotations
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2019"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from typing import TYPE_CHECKING, List, Tuple, Sequence, Dict, Iterable
from qtpy.QtCore import (
Signal,
Slot,
Qt,
QSize,
QPointF,
QCoreApplication,
)
from qtpy.QtWidgets import (
QMessageBox,
QProgressDialog,
QListWidgetItem,
QInputDialog,
QWidget,
QApplication,
)
from qtpy.QtGui import QImage, QPainter, QPixmap
from pyslvs import (
Graph,
link_assortment,
contracted_link_assortment,
labeled_enumerate,
is_planar,
external_loop_layout,
)
from pyslvs_ui.qt_patch import qt_image_format
from pyslvs_ui.graphics import graph2icon, engine_picker, engines
from .dialogs.targets import TargetsDialog
from .structure_widget_ui import Ui_Form
if TYPE_CHECKING:
from pyslvs_ui.widgets import MainWindowBase
class StructureWidget(QWidget, Ui_Form):
"""Structure widget.
Preview the structures that was been added in collection list by user.
"""
layout_sender = Signal(Graph, dict)
def __init__(self, parent: MainWindowBase) -> None:
"""Get IO dialog functions from parent."""
super(StructureWidget, self).__init__(parent)
self.setupUi(self)
self.output_to = parent.output_to
self.save_reply_box = parent.save_reply_box
self.input_from_multiple = parent.input_from_multiple
self.add_points_by_graph = parent.add_points_by_graph
self.project_no_save = parent.project_no_save
self.prefer = parent.prefer
# Data structures
self.collections: List[Graph] = []
self.collections_layouts: List[Dict[int, Tuple[float, float]]] = []
self.collections_grounded: List[Graph] = []
# Engine list
self.graph_engine.addItems(engines)
def clear(self) -> None:
"""Clear all sub-widgets."""
for button in (
self.merge_button,
self.configure_button,
self.duplicate_button,
):
button.setEnabled(False)
self.collections.clear()
self.collection_list.clear()
self.__clear_selection()
@Slot(name='on_clear_button_clicked')
def __user_clear(self) -> None:
"""Ask user before clear."""
if not self.collections:
return
if QMessageBox.question(
self,
"Delete",
"Sure to remove all your collections?"
) != QMessageBox.Yes:
return
self.clear()
self.project_no_save()
@Slot(name='on_reload_atlas_clicked')
@Slot(bool, name='on_graph_link_as_node_toggled')
@Slot(bool, name='on_graph_show_label_toggled')
@Slot(int, name='on_graph_engine_currentIndexChanged')
def __reload_atlas(self) -> None:
"""Reload atlas with the engine."""
current_pos = self.collection_list.currentRow()
self.collections_layouts.clear()
self.collection_list.clear()
self.__clear_selection()
if not self.collections:
return
dlg = QProgressDialog(
"Drawing atlas...",
"Cancel",
0,
len(self.collections),
self
)
dlg.setWindowTitle("Type synthesis")
dlg.resize(400, dlg.height())
dlg.setModal(True)
dlg.show()
engine_str = self.graph_engine.currentText()
for i, g in enumerate(self.collections):
QCoreApplication.processEvents()
if dlg.wasCanceled():
dlg.deleteLater()
return
item = QListWidgetItem(f"No. {i + 1}")
pos = engine_picker(g, engine_str, self.graph_link_as_node.isChecked())
item.setIcon(graph2icon(
g,
self.collection_list.iconSize().width(),
self.graph_link_as_node.isChecked(),
self.graph_show_label.isChecked(),
self.prefer.monochrome_option,
pos=pos
))
self.collections_layouts.append(pos)
item.setToolTip(f"{g.edges}")
self.collection_list.addItem(item)
dlg.setValue(i + 1)
dlg.deleteLater()
if current_pos > -1:
self.collection_list.setCurrentRow(current_pos)
self.__set_selection(self.collection_list.currentItem())
def __is_valid_graph(self, edges: Iterable[Tuple[int, int]]) -> str:
"""Test graph and return True if it is valid."""
try:
g = Graph(edges)
except (TypeError, ValueError):
return "wrong format"
if not g.edges:
return "is an empty graph"
if not g.is_connected():
return "is not a close chain"
if not is_planar(g):
return "is not a planar chain"
if g.has_cut_link():
return "has cut link"
try:
external_loop_layout(g, True)
except ValueError as error:
return str(error)
for h in self.collections:
if g.is_isomorphic(h):
return f"is isomorphic with: {h.edges}"
return ""
def add_collection(self, edges: Iterable[Tuple[int, int]], *, reload: bool = True) -> None:
"""Add collection by in put edges."""
error = self.__is_valid_graph(edges)
if error:
QMessageBox.warning(self, "Add Collection Error", f"Error: {error}")
return
self.collections.append(Graph(edges))
self.project_no_save()
if reload:
self.__reload_atlas()
def add_collections(self, collections: Sequence[Sequence[Tuple[int, int]]]) -> None:
"""Add collections."""
for edges in collections:
self.add_collection(edges)
@Slot(name='on_add_by_edges_button_clicked')
def __add_from_edges(self) -> None:
"""Add collection by input string."""
edges_str = ""
while not edges_str:
edges_str, ok = QInputDialog.getText(
self,
"Add by edges",
"Please enter a connection expression:\n"
"Example: [(0, 1), (1, 2), (2, 3), (3, 0)]"
)
if not ok:
return
try:
edges = eval(edges_str)
if any(len(edge) != 2 for edge in edges):
raise ValueError("wrong format")
except (SyntaxError, ValueError) as error:
QMessageBox.warning(self, str(error), f"Error: {error}")
return
else:
self.add_collection(edges)
@Slot(name='on_add_by_files_button_clicked')
def __add_from_files(self) -> None:
"""Append atlas by text files."""
file_names = self.input_from_multiple(
"edges data",
["Text File (*.txt)"]
)
if not file_names:
return
read_data = []
for file_name in file_names:
with open(file_name, 'r', encoding='utf-8') as f:
for line in f:
read_data.append(line)
errors = []
for edges_str in read_data:
try:
edges = eval(edges_str)
if any(len(edge) != 2 for edge in edges):
raise ValueError("wrong format")
except (SyntaxError, ValueError) as error:
errors.append(str(error))
else:
self.add_collection(edges, reload=False)
if errors:
QMessageBox.warning(self, "Loaded Error", "Error:" + '\n'.join(errors))
self.__reload_atlas()
@Slot(name='on_capture_graph_clicked')
def __save_graph(self) -> None:
"""Save the current graph."""
if self.selection_window.count() != 1:
return
file_name = self.output_to("atlas image", qt_image_format)
if not file_name:
return
pixmap: QPixmap = self.selection_window.item(0).icon().pixmap(self.selection_window.iconSize())
pixmap.save(file_name)
self.save_reply_box("Graph", file_name)
@Slot(name='on_save_atlas_clicked')
def __save_atlas(self) -> None:
"""Save function as same as type synthesis widget."""
count = self.collection_list.count()
if count < 1:
return
lateral, ok = QInputDialog.getInt(
self,
"Atlas",
"The number of lateral:",
5, 1
)
if not ok:
return
file_name = self.output_to("atlas image", qt_image_format)
if not file_name:
return
icon_size = self.collection_list.iconSize()
width = icon_size.width()
image = self.collection_list.item(0).icon().pixmap(icon_size).toImage()
image_main = QImage(QSize(
lateral if count > lateral else count,
(count // lateral) + bool(count % lateral)
) * width, image.format())
image_main.fill(Qt.transparent)
painter = QPainter(image_main)
for row in range(count):
image = self.collection_list.item(row).icon().pixmap(icon_size).toImage()
painter.drawImage(QPointF(row % lateral, row // lateral) * width, image)
painter.end()
pixmap = QPixmap()
pixmap.convertFromImage(image_main)
pixmap.save(file_name)
self.save_reply_box("Atlas", file_name)
@Slot(name='on_save_edges_clicked')
def __save_edges(self) -> None:
"""Save function as same as type synthesis widget."""
count = self.collection_list.count()
if count < 1:
return
file_name = self.output_to("atlas edges expression", ["Text file (*.txt)"])
if not file_name:
return
with open(file_name, 'w', encoding='utf-8') as f:
f.write('\n'.join(str(g.edges) for g in self.collections))
self.save_reply_box("edges expression", file_name)
@Slot(QListWidgetItem, name='on_collection_list_itemClicked')
def __set_selection(self, item: QListWidgetItem) -> None:
"""Show the data of collection.
Save the layout position to keep the graphs
will be in same appearance.
"""
for button in (
self.delete_button,
self.configure_button,
self.duplicate_button,
):
button.setEnabled(item is not None)
self.selection_window.clear()
if item is None:
return
# Preview item
link_is_node = self.graph_link_as_node.isChecked()
item_preview = QListWidgetItem(item.text())
row = self.collection_list.row(item)
g = self.collections[row]
self.ground_engine = self.collections_layouts[row]
item_preview.setIcon(graph2icon(
g,
self.selection_window.iconSize().width(),
link_is_node,
self.graph_show_label.isChecked(),
self.prefer.monochrome_option,
pos=self.ground_engine
))
self.selection_window.addItem(item_preview)
# Set attributes
self.edges_text.setText(str(list(g.edges)))
self.nl_label.setText(str(len(g.vertices)))
self.nj_label.setText(str(len(g.edges)))
self.dof_label.setText(str(g.dof()))
self.is_degenerate_label.setText(str(g.is_degenerate()))
self.link_assortment_label.setText(str(link_assortment(g)))
self.contracted_link_assortment_label.setText(str(contracted_link_assortment(g)))
# Buttons
self.duplicate_button.setEnabled(link_is_node)
self.configure_button.setEnabled(not link_is_node)
self.merge_button.setEnabled(not link_is_node)
self.__grounded()
def __clear_selection(self) -> None:
"""Clear the selection preview data."""
self.grounded_list.clear()
self.selection_window.clear()
self.edges_text.clear()
self.nl_label.setText('0')
self.nj_label.setText('0')
self.dof_label.setText('0')
self.is_degenerate_label.setText("N/A")
self.link_assortment_label.setText("N/A")
self.contracted_link_assortment_label.setText("N/A")
@Slot(name='on_expr_copy_clicked')
def __copy_expr(self) -> None:
"""Copy the expression."""
string = self.edges_text.text()
if string:
QApplication.clipboard().setText(string)
self.edges_text.selectAll()
@Slot(name='on_delete_button_clicked')
def __delete_collection(self) -> None:
"""Delete the selected collection."""
row = self.collection_list.currentRow()
if not row > -1:
return
if QMessageBox.question(
self,
"Delete",
f"Sure to remove #{row} from your collections?"
) != QMessageBox.Yes:
return
self.collection_list.takeItem(row)
self.collections.pop(row)
self.collections_layouts.pop(row)
self.__clear_selection()
self.project_no_save()
@Slot(name='on_duplicate_button_clicked')
def __make_duplicate(self) -> None:
"""Make current graph symmetric."""
row = self.collection_list.currentRow()
if not row > -1:
return
graph = self.collections[row]
dlg = TargetsDialog(
"Select the vertices (links) you want to copy.\n"
"The duplication will keep adjacency",
"",
graph.vertices,
(),
self
)
dlg.show()
if not dlg.exec_():
dlg.deleteLater()
return
targets = dlg.targets()
dlg.deleteLater()
times, ok = QInputDialog.getInt(
self,
"Make duplicate",
"The count of duplication:",
1, 1
)
if not ok:
return
new_graph = graph.duplicate(targets, times)
self.add_collection(new_graph.edges)
@Slot(name='on_configure_button_clicked')
def __configuration(self) -> None:
"""Triangular iteration."""
self.layout_sender.emit(
self.collections[self.collection_list.currentRow()],
self.ground_engine.copy()
)
def __grounded(self) -> None:
"""Grounded combinations."""
current_item = self.collection_list.currentItem()
self.collections_grounded.clear()
self.grounded_list.clear()
g = self.collections[self.collection_list.row(current_item)]
item = QListWidgetItem("Released")
icon = graph2icon(
g,
self.grounded_list.iconSize().width(),
self.graph_link_as_node.isChecked(),
self.graph_show_label.isChecked(),
self.prefer.monochrome_option,
pos=self.ground_engine
)
item.setIcon(icon)
self.collections_grounded.append(g)
self.grounded_list.addItem(item)
for node, graph_ in labeled_enumerate(g):
item = QListWidgetItem(f"link_{node}")
icon = graph2icon(
g,
self.grounded_list.iconSize().width(),
self.graph_link_as_node.isChecked(),
self.graph_show_label.isChecked(),
self.prefer.monochrome_option,
except_node=node,
pos=self.ground_engine
)
item.setIcon(icon)
self.collections_grounded.append(graph_)
self.grounded_list.addItem(item)
@Slot(name='on_merge_button_clicked')
def __grounded_merge(self) -> None:
"""Merge the grounded result."""
item = self.grounded_list.currentItem()
if not item:
return
graph = self.collections_grounded[0]
text = item.text()
if text == "Released":
ground_link = None
else:
ground_link = int(text.split("_")[1])
if QMessageBox.question(
self,
"Message",
f"Merge \"{text}\" chain to your canvas?"
) == QMessageBox.Yes:
self.add_points_by_graph(
graph,
self.ground_engine,
ground_link
)
|
KmolYuan/Pyslvs-PyQt5
|
pyslvs_ui/synthesis/collections/structure_widget.py
|
Python
|
agpl-3.0
| 16,405
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from avatar.models import Avatar
from avatar.templatetags.avatar_tags import avatar
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__username',)
list_per_page = 50
def get_avatar(self, avatar_in):
return avatar(avatar_in.user, 80)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
admin.site.register(Avatar, AvatarAdmin)
|
ofri/django-avatar
|
avatar/admin.py
|
Python
|
bsd-3-clause
| 580
|
extensions = []
master_doc = 'index'
project = u'Org\Heigl\Hyphenator'
copyright = u'2011-2012, Andreas Heigl <andreas@heigl.org>'
exclude_patterns = ['_build']
highlight_language = 'php'
html_theme = 'nature'
|
fxbenard/typofr
|
vendor/Org_Heigl/hyphenator/docs/conf.py
|
Python
|
mit
| 210
|
from django.test import SimpleTestCase, RequestFactory, override_settings
from rest_framework.reverse import reverse
from program_management.api.serializers.prerequisite import ProgramTreePrerequisitesSerializer, \
NodeBaseSerializer
from program_management.ddd.domain.program_tree import ProgramTree
from program_management.tests.ddd.factories.domain.prerequisite.prerequisite import PrerequisitesFactory
from program_management.tests.ddd.factories.link import LinkFactory
from program_management.tests.ddd.factories.node import NodeGroupYearFactory, NodeLearningUnitYearFactory
@override_settings(LANGUAGES=[('fr', 'Français'), ], LANGUAGE_CODE='fr')
class TestEducationGroupPrerequisitesSerializer(SimpleTestCase):
def setUp(self):
"""
root_node
|-----common_core
|---- LDROI100A (UE) Prerequisites: LDROI1300 AND LAGRO2400
|-----subgroup1
|---- LDROI1300 (UE)
|---- LAGRO2400 (UE)
:return:
"""
self.root_node = NodeGroupYearFactory(node_id=1, code="LBIR100B", title="Bachelier en droit", year=2018)
self.common_core = NodeGroupYearFactory(node_id=2, code="LGROUP100A", title="Tronc commun", year=2018)
self.subgroup1 = NodeGroupYearFactory(node_id=3, code="LGROUP101A", title="Sous-groupe 1", year=2018)
self.ldroi100a = NodeLearningUnitYearFactory(node_id=4,
code="LDROI100A",
common_title_fr="Introduction",
specific_title_fr="Partie 1",
year=2018)
self.ldroi1300 = NodeLearningUnitYearFactory(node_id=5,
code="LDROI1300",
common_title_fr="Introduction droit",
specific_title_fr="Partie 1",
year=2018)
self.lagro2400 = NodeLearningUnitYearFactory(node_id=6,
code="LAGRO2400",
common_title_fr="Séminaire agro",
specific_title_fr="Partie 1",
year=2018)
LinkFactory(parent=self.root_node, child=self.common_core)
LinkFactory(parent=self.common_core, child=self.ldroi100a)
LinkFactory(parent=self.root_node, child=self.subgroup1)
LinkFactory(parent=self.subgroup1, child=self.ldroi1300)
LinkFactory(parent=self.subgroup1, child=self.lagro2400)
self.tree = ProgramTree(root_node=self.root_node)
PrerequisitesFactory.produce_inside_tree(
context_tree=self.tree,
node_having_prerequisite=self.ldroi100a.entity_id,
nodes_that_are_prequisites=[self.ldroi1300, self.lagro2400]
)
url = reverse('program_management_api_v1:training-prerequisites_official',
kwargs={'year': self.root_node.year, 'acronym': self.root_node.code})
self.request = RequestFactory().get(url)
self.serializer = ProgramTreePrerequisitesSerializer(self.ldroi100a, context={
'request': self.request,
'language': 'fr',
'tree': self.tree
})
def test_contains_expected_fields(self):
expected_fields = [
'title',
'title_en',
'url',
'code',
'prerequisites_string',
'prerequisites',
]
self.assertListEqual(expected_fields, list(self.serializer.data.keys()))
def test_read_prerequisite_on_training(self):
with self.subTest('title'):
self.assertEqual(self.ldroi100a.common_title_fr + ' - ' + self.ldroi100a.specific_title_fr,
self.serializer.data.get('title'))
with self.subTest('url'):
url = reverse('learning_unit_api_v1:learningunits_read',
kwargs={'year': self.ldroi100a.year, 'acronym': self.ldroi100a.code},
request=self.request)
self.assertEqual(url, self.serializer.data.get('url'))
with self.subTest('code'):
self.assertEqual("LDROI100A", self.serializer.data.get('code'))
with self.subTest('prerequisites_string'):
self.assertEqual(
self.tree.get_prerequisite(self.ldroi100a).get_prerequisite_expression(translate=False),
self.serializer.data.get('prerequisites_string')
)
class TestLearningUnitBaseSerializer(SimpleTestCase):
def setUp(self):
self.ldroi1300 = NodeLearningUnitYearFactory(node_id=7,
code="LDROI1300",
common_title_fr="Introduction droit",
specific_title_fr="Partie 1",
year=2018)
url = reverse('program_management_api_v1:training-prerequisites_official',
kwargs={'year': 2018, 'acronym': 'LDROI1300'})
self.request = RequestFactory().get(url)
self.serializer = NodeBaseSerializer(self.ldroi1300, context={
'request': self.request,
'language': 'fr',
})
def test_title_with_only_common_title_if_no_specific(self):
node_lu = NodeLearningUnitYearFactory(node_id=7,
code="LDROI1302",
common_title_fr="Introduction droit",
year=2018)
url = reverse('program_management_api_v1:training-prerequisites_official',
kwargs={'year': 2018, 'acronym': 'LDROI1302'})
request = RequestFactory().get(url)
serializer = NodeBaseSerializer(node_lu, context={
'request': request,
'language': 'fr',
})
self.assertEqual(serializer.data['title'], node_lu.common_title_fr)
def test_contains_expected_fields(self):
expected_fields = [
'title',
'title_en',
'url',
'code',
]
self.assertListEqual(expected_fields, list(self.serializer.data.keys()))
def test_read(self):
with self.subTest('title'):
self.assertEqual(self.ldroi1300.common_title_fr + ' - ' + self.ldroi1300.specific_title_fr,
self.serializer.data.get('title'))
with self.subTest('url'):
url = reverse('learning_unit_api_v1:learningunits_read',
kwargs={'year': self.ldroi1300.year, 'acronym': self.ldroi1300.code},
request=self.request)
self.assertEqual(url, self.serializer.data.get('url'))
with self.subTest('code'):
self.assertEqual(self.ldroi1300.code, self.serializer.data.get('code'))
|
uclouvain/OSIS-Louvain
|
program_management/tests/api/serializers/test_prerequisite.py
|
Python
|
agpl-3.0
| 7,192
|
"""This module provides plugins used in the hairball paper."""
from collections import defaultdict, Counter
from hairball.plugins import HairballPlugin
class Animation(HairballPlugin):
"""Plugin that checks for instances of 'complex animation'.
Animation should include loops, motion, timing, and costume changes.
"""
COSTUME = frozenset(['switch to costume %s', 'next costume'])
LOOP = frozenset(['repeat %s', 'repeat until %s%s', 'forever',
'forever if %s%s'])
MOTION = frozenset(['change y by %s', 'change x by %s',
'glide %s secs to x:%s y:%s',
'move %s steps', 'go to x:%s y:%s'])
ROTATE = frozenset(['turn cw %s degrees', 'turn ccw %s degrees',
'point in direction %s'])
SIZE = frozenset(['change size by %s', 'set size to %s%%'])
TIMING = frozenset(['wait %s secs', 'glide %s secs to x:%s y:%s'])
ANIMATION = COSTUME | LOOP | MOTION | ROTATE | SIZE | TIMING
@staticmethod
def check_results(tmp_):
"""Return a 3 tuple for something."""
# TODO: Fix this to work with more meaningful names
if tmp_['t'] > 0:
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 1, 3, tmp_
return 3
elif tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 2, 3, tmp_
return 3
elif tmp_['mr'] > 0 or tmp_['ma'] > 1:
print 3, 2, tmp_
return 2
if tmp_['cr'] > 1 or tmp_['ca'] > 2:
print 4, 2, tmp_
return 2
if tmp_['mr'] > 0 or tmp_['ma'] > 1:
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 6, 0, tmp_
return 0
if tmp_['rr'] > 1 or tmp_['ra'] > 2:
print 7, 0, tmp_
return 0
if tmp_['sr'] > 1 or tmp_['sa'] > 2:
print 8, 0, tmp_
return 0
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 9, 2, tmp_
return 2
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 10, 0, tmp_
return 0
return -1
def _check_animation(self, last, last_level, gen):
"""Internal helper function to check the animation."""
tmp_ = Counter()
results = Counter()
name, level, block = last, last_level, last
others = False
while name in self.ANIMATION and level >= last_level:
if name in self.LOOP:
if block != last:
count = self.check_results(tmp_)
if count > -1:
results[count] += 1
tmp_.clear()
tmp_['last'] += 1
for attribute in ('costume', 'orientation', 'position', 'size'):
if (name, 'relative') in self.BLOCKMAPPING[attribute]:
tmp_[(attribute, 'relative')] += 1
elif (name, 'absolute') in self.BLOCKMAPPING[attribute]:
tmp_[(attribute, 'absolute')] += 1
if name in self.TIMING:
tmp_['timing'] += 1
last_level = level
name, level, block = next(gen, ('', 0, ''))
# allow some exceptions
if name not in self.ANIMATION and name != '':
if not others:
if block.type.shape != 'stack':
last_level = level
(name, level, block) = next(gen, ('', 0, ''))
others = True
count = self.check_results(tmp_)
if count > -1:
results[count] += 1
return gen, results
def analyze(self, scratch, **kwargs):
"""Run and return the results from the Animation plugin."""
results = Counter()
for script in self.iter_scripts(scratch):
gen = self.iter_blocks(script.blocks)
name = 'start'
level = None
while name != '':
if name in self.ANIMATION:
gen, count = self._check_animation(name, level, gen)
results.update(count)
name, level, _ = next(gen, ('', 0, ''))
return {'animation': results}
class BroadcastReceive(HairballPlugin):
"""Plugin that checks for proper usage of broadcast and receive blocks."""
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events
def analyze(self, scratch, **kwargs):
"""Run and return the results from the BroadcastReceive plugin."""
all_scripts = list(self.iter_scripts(scratch))
results = defaultdict(set)
broadcast = dict((x, self.get_broadcast_events(x)) # Events by script
for x in all_scripts)
correct = self.get_receive(all_scripts)
results['never broadcast'] = set(correct.keys())
for script, events in broadcast.items():
for event in events.keys():
if event is True: # Remove dynamic broadcasts
results['dynamic broadcast'].add(script.morph.name)
del events[event]
elif event in correct:
results['never broadcast'].discard(event)
else:
results['never received'].add(event)
# remove events from correct dict that were never broadcast
for event in correct.keys():
if event in results['never broadcast']:
del correct[event]
# Find scripts that have more than one broadcast event on any possible
# execution path through the program
# TODO: Permit mutually exclusive broadcasts
for events in broadcast.values():
if len(events) > 1:
for event in events:
if event in correct:
results['parallel broadcasts'].add(event)
del correct[event]
# Find events that have two (or more) receivers in which one of the
# receivers has a "delay" block
for event, scripts in correct.items():
if len(scripts) > 1:
for script in scripts:
for _, _, block in self.iter_blocks(script.blocks):
if block.type.shape == 'stack':
results['multiple receivers with delay'].add(event)
if event in correct:
del correct[event]
results['success'] = set(correct.keys())
return {'broadcast': results}
class SaySoundSync(HairballPlugin):
"""Plugin that checks for synchronization between say and sound blocks.
The order should be:
Say "___",
Play sound "___" until done,
Say ""
"""
CORRECT = -1
ERROR = 0
INCORRECT = 1
HACKISH = 2
SAY_THINK = ('say %s', 'think %s')
SAY_THINK_DURATION = ('say %s for %s secs', 'think %s for %s secs')
ALL_SAY_THINK = SAY_THINK + SAY_THINK_DURATION
@staticmethod
def is_blank(word):
"""Return True if the string is empty, or only whitespace."""
return not word or word.isspace()
def analyze(self, scratch, **kwargs):
"""Categorize instances of attempted say and sound synchronization."""
errors = Counter()
for script in self.iter_scripts(scratch):
prev_name, prev_depth, prev_block = '', 0, script.blocks[0]
gen = self.iter_blocks(script.blocks)
for name, depth, block in gen:
if prev_depth == depth:
if prev_name in self.SAY_THINK:
if name == 'play sound %s until done':
if not self.is_blank(prev_block.args[0]):
errors += self.check(gen)
# TODO: What about play sound?
elif prev_name in self.SAY_THINK_DURATION and \
'play sound %s' in name:
errors['1'] += 1
elif prev_name == 'play sound %s':
if name in self.SAY_THINK:
errors[self.INCORRECT] += 1
elif name in self.SAY_THINK_DURATION:
if self.is_blank(block.args[0]):
errors[self.ERROR] += 1
else:
errors[self.HACKISH] += 1
elif prev_name == 'play sound %s until done' and \
name in self.ALL_SAY_THINK:
if not self.is_blank(block.args[0]):
errors[self.INCORRECT] += 1
# TODO: Should there be an else clause here?
prev_name, prev_depth, prev_block = name, depth, block
return {'sound': errors}
def check(self, gen):
"""Check that the last part of the chain matches.
TODO: Fix to handle the following situation that appears to not work
say 'message 1'
play sound until done
say 'message 2'
say 'message 3'
play sound until done
say ''
"""
retval = Counter()
name, _, block = next(gen, ('', 0, ''))
if name in self.SAY_THINK:
if self.is_blank(block.args[0]):
retval[self.CORRECT] += 1
else:
name, _, block = next(gen, ('', 0, ''))
if name == 'play sound %s until done':
# Increment the correct count because we have at least
# one successful instance
retval[self.CORRECT] += 1
# This block represents the beginning of a second
retval += self.check(gen)
else:
retval[self.INCORRECT] += 1
else:
retval[self.INCORRECT] += 1
return retval
|
thsunmy/hairball
|
hairball/plugins/checks.py
|
Python
|
bsd-2-clause
| 10,450
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import gym
import numpy as np
import tensorflow as tf
from agents.cem import CEM
from networks.linear import LinearModel
from networks.softmax_linear import SoftmaxLinearModel
# Gym params
EXPERIMENT_DIR = './cartpole-experiment-1'
if __name__ == "__main__":
np.random.seed(0)
env = gym.make('CartPole-v0')
env.monitor.start(EXPERIMENT_DIR, force=True)
num_features = env.observation_space.shape[0]
with tf.Session() as sess:
linear_network = LinearModel(sess, num_features)
cem = CEM(num_features = num_features, \
sess = sess, \
pred_network = linear_network, \
env = env, \
batch_size = 30, \
max_num_steps = 200, \
elite_frac = .2, \
n_iter = 20)
cem.train()
print(cem.perf_hist)
softmax_linear_network = SoftmaxLinearModel(sess, num_features)
cem = CEM(num_features = num_features, \
sess = sess, \
pred_network = softmax_linear_network, \
env = env, \
batch_size = 30, \
max_num_steps = 200, \
elite_frac = .2, \
n_iter = 20)
cem.train()
print(cem.perf_hist)
|
evancasey/tensorflow-policy-gradient
|
run_cem.py
|
Python
|
mit
| 1,337
|
#!/usr/bin/env python
# encoding: utf-8
'''
IMPORTANTE: instalados python3 (>=3.4.0) y python2 (>=2.7)
MainAnaly -- shortdesc
MainAnaly is a description
It defines classes_and_methods
@author: Carlos Gonzalez Sesmero
@copyright: 2017
@license:
@contact: gcarlosonza@gmail.com
@deffield updated: Updated
@requires: pep8 pylint BeautifulSoup4
* For future releases.
'''
import sys
sys.path.append('/home/chilli-mint/Dropbox/MiStuRe/misture_core/MISTURE')
import utils.utilidades as Mutils
from funcionalityx.general import mainActividad
from funcionalityx.elemxml import FuncXML
def main(argv=None):
# If main aren't call with pars, it get them from argv of console.
if argv is None:
argv = sys.argv[1:]
if __name__ == "__main__":
# COnfiguramos la entrega git/local
actividad = {
'profesor': ('grex',
'gcarlosonza@gmail.com',
'/home/chilli-mint/tmp/misture/p6ptavi/profesor/'),
'descripcion': 'Práctica 6 de PTAVI',
'ruta': '/home/chilli-mint/tmp/misture/p6ptavi/ejecucion/'
}
# lista_login_github = [("opedraza", "olallasanchez")]
lista_login_github = [
("iarranz", "igarag"),
("smarin", "silviamaa"),
("miriammz", "miriammz"),
("rgalan", "raquelgalan"),
("jmarugan", "jfernandezmaru"),
("jcdb", "jcdb"),
("maferna", "mghfdez"),
("mtejedor", "mtejedorg"),
("apavo", "apavo"),
("oterino", "aoterinoc"),
("ndiaz", "nathdiaza"),
("crodrigu", "crodriguezgarci"),
("ilope", "ilope236"),
("opedraza", "olallasanchez"),
("calvarez", "calvarezpe"),
("dpascual", "dpascualhe"),
("avera", "Abel-V"),
("amoles", "alvaromv83"),
("aramas", "aramas"),
("jbaos", "JaviBM11"),
("rsierra", "rsierrangulo"),
("imalo", "nmalo5"),
("mireya", "mireepink"),
("albagc", "albagcs"),
("rpablos", "raquelpt"),
("cgarcia", "celiagarcia"),
("lyanezgu", "lyanezgu"),
("omarled", "auronff10"),
("roger", "rogerurrutia"),
# "lsoria", "lsoriai"),
("zhiyuan", "ziyua"),
("mcapitan", "mcapitan"),
("juanmis", "Jmita"),
("molina", "jmartinezmolina"),
("afrutos", "alejandrodefrutos"),
# "carlosjloh", "CarlosJLoH"),
("sagun", "caarrieta")
]
# github_dict = collections.OrderedDict(lista_login_github)
lista_entrega = [(x[0], 'http://github.com/' + x[1] + '/ptavi-p6/')
for x in lista_login_github]
#####################################################################
time = Mutils.Chrono() # Mutils.Chrono()
time.start('Principal')
# Generamos la actividad y entregas a partir de la información inicial.
time.start('INICIAR')
actividad = mainActividad(lista_entrega, actividad)
'''
actividad.iniciar_rutas(borrarbase=True) # Borramos base e inicializamos.
actividad.iniciar_rutas_login()
time.finish('INICIAR')
print('INICIAR', time.t_count('INICIAR'), 'seconds.')
# DESCARGAMOS LOS REPOSITORIOS
time.start('COPIADO')
actividad.descargar_repos()
actividad.preparar_entorno_pruebas()
time.finish('COPIADO')
print('COPIADO', time.t_count('COPIADO'), 'seconds.')
# ANALISIS DE LOS FICHEROS DEL REPOSITORIO
time.start('FICHEROS')
actividad.fun_fuentes()
time.finish('FICHEROS')
print('FICHEROS', time.t_count('FICHEROS'), 'seconds.')
# ANALISIS DE GIT
time.start('GIT')
actividad.fun_git()
time.finish('GIT')
print('GIT', time.t_count('GIT'), 'seconds.')
# ANALISIS DE ELEMENTOS DE CODIGO PYTHON
time.start('PYTHON')
actividad.fun_pyelements()
time.finish('PYTHON')
print('PYTHON', time.t_count('PYTHON'), 'seconds.')
# ANALISIS DE CHECKS DE CODIGO PYTHON
time.start('CHECKS')
actividad.fun_codecheck(codigos_exc='E125,E127,E128,E999')
time.finish('CHECKS')
print('CHECKS', time.t_count('CHECKS'), 'seconds.')
# ANALISIS DE XML-PDML
time.start('XML')
actividad.fun_xml(FuncXML.PDML) # Seleccionado tipo TRAZA PARA PDML
time.finish('XML')
print('XML', time.t_count('XML'), 'seconds.')
'''
# GENERAR EXAMEN
time.start('EXAMEN')
actividad.fun_examen()
time.finish('EXAMEN')
print('EXAMEN', time.t_count('EXAMEN'), 'seconds.')
# PRUEBAS
'''time.start('PRUEBAS')
actividad.fun_pruebas()
time.finish('PRUEBAS')
print('PRUEBAS', time.t_count('PRUEBAS'), 'seconds.')
'''
# IMPRESIÓN DE RESUMEN DE ALUMNOS Y TIEMPO.
time.finish('Principal')
print('Checked in {} seconds.'.format(time.t_count('Principal')))
"""
for name in sorted(conf.students):
# Finishing, mailing.
'''Mutils.sendMail('c.gonzalez@openmailbox.org',
'Resultados para ' + name,
'{}\n'
.format(Mutils.readFileFull(al.resultPath)))'''
# Matamos to_do. Gracias, bmuma!
for python_file in PYFILES:
ejec = "ps aux | grep " + python_file + " | awk '{print $2}'"
for ident in subprocess_getoutput(ejec).split():
pass
os_system('kill -9 ' + ident + ' > /dev/null 2>&1')
"""
sys.exit()
|
carlos-gs/MiStuRe
|
misture_core/MainAnaly.py
|
Python
|
gpl-3.0
| 5,447
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils
from neutron.common import config
from neutron.conf.agent import cmd as command
LOG = logging.getLogger(__name__)
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
conf = cfg.CONF
command.register_cmd_opts(command.ip_opts, conf)
return conf
def remove_iptables_reference(ipset):
# Remove any iptables reference to this IPset
cmd = ['iptables-save'] if 'IPv4' in ipset else ['ip6tables-save']
iptables_save = utils.execute(cmd, run_as_root=True)
if ipset in iptables_save:
cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables']
cmd += ['-w', '10'] # wait for xlock release
LOG.info("Removing iptables rule for IPset: %s", ipset)
for rule in iptables_save.splitlines():
if '--match-set %s ' % ipset in rule and rule.startswith('-A'):
# change to delete
params = rule.split()
params[0] = '-D'
try:
utils.execute(cmd + params, run_as_root=True)
except Exception:
LOG.exception('Error, unable to remove iptables rule '
'for IPset: %s', ipset)
def destroy_ipset(conf, ipset):
# If there is an iptables reference and we don't remove it, the
# IPset removal will fail below
if conf.force:
remove_iptables_reference(ipset)
LOG.info("Destroying IPset: %s", ipset)
cmd = ['ipset', 'destroy', ipset]
try:
utils.execute(cmd, run_as_root=True)
except Exception:
LOG.exception('Error, unable to destroy IPset: %s', ipset)
def cleanup_ipsets(conf):
# Identify ipsets for destruction.
LOG.info("Destroying IPsets with prefix: %s", conf.prefix)
cmd = ['ipset', '-L', '-n']
ipsets = utils.execute(cmd, run_as_root=True)
for ipset in ipsets.split('\n'):
if conf.allsets or ipset.startswith(conf.prefix):
destroy_ipset(conf, ipset)
LOG.info("IPset cleanup completed successfully")
def main():
"""Main method for cleaning up IPsets.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --allsets flag should only be used as part of the cleanup of a devstack
installation as it will blindly destroy all IPsets.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_ipsets(conf)
|
noironetworks/neutron
|
neutron/cmd/ipset_cleanup.py
|
Python
|
apache-2.0
| 3,298
|
import time
from watchdog.events import LoggingEventHandler
from watchdog.observers import Observer
from config import *
from FSEventHandler import FSEventHandler
if __name__ == "__main__":
observer = Observer()
observer.schedule(FSEventHandler(api_base_uri=API_BASE_URI,
api_secret=API_SECRET, path_prefix=PATH_PREFIX), path='.',
recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
dash1291/teleport-client
|
teleport.py
|
Python
|
mit
| 523
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install/copy the image to the device."""
import cStringIO
import logging
import os
import shutil
import tempfile
import time
import urlparse
from chromite import cros
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import dev_server_wrapper as ds_wrapper
from chromite.lib import osutils
from chromite.lib import remote_access
DEVSERVER_STATIC_DIR = cros_build_lib.FromChrootPath(
os.path.join(constants.CHROOT_SOURCE_ROOT, 'devserver', 'static'))
IMAGE_NAME_TO_TYPE = {
'chromiumos_test_image.bin': 'test',
'chromiumos_image.bin': 'dev',
'chromiumos_base_image.bin': 'base',
'recovery_image.bin': 'recovery',
}
IMAGE_TYPE_TO_NAME = {
'test': 'chromiumos_test_image.bin',
'dev': 'chromiumos_image.bin',
'base': 'chromiumos_base_image.bin',
'recovery': 'recovery_image.bin',
}
XBUDDY_REMOTE = 'remote'
XBUDDY_LOCAL = 'local'
def ConvertTranslatedPath(original_path, translated_path):
"""Converts a translated xbuddy path to an xbuddy path.
Devserver/xbuddy does not accept requests with translated xbuddy
path (build-id/version/image-name). This function converts such a
translated path to an xbuddy path that is suitable to used in
devserver requests.
Args:
original_path: the xbuddy path before translation.
(e.g., remote/peppy/latest-canary).
translated_path: the translated xbuddy path
(e.g., peppy-release/R36-5760.0.0).
Returns:
A xbuddy path uniquely identifies a build and can be used in devserver
requests: {local|remote}/build-id/version/image_type
"""
chunks = translated_path.split(os.path.sep)
chunks[-1] = IMAGE_NAME_TO_TYPE[chunks[-1]]
if _GetXbuddyPath(original_path).startswith(XBUDDY_REMOTE):
chunks = [XBUDDY_REMOTE] + chunks
else:
chunks = [XBUDDY_LOCAL] + chunks
return os.path.sep.join(chunks)
def _GetXbuddyPath(path):
"""A helper function to parse an xbuddy path.
Args:
path: Either a path without no scheme or an xbuddy://path/for/xbuddy
Returns:
path/for/xbuddy if |path| is xbuddy://path/for/xbuddy; otherwise,
returns |path|.
Raises:
ValueError if |path| uses any scheme other than xbuddy://.
"""
parsed = urlparse.urlparse(path)
# pylint: disable=E1101
if parsed.scheme == 'xbuddy':
return '%s%s' % (parsed.netloc, parsed.path)
elif parsed.scheme == '':
logging.debug('Assuming %s is an xbuddy path.', path)
return path
else:
raise ValueError('Do not support scheme %s.', parsed.scheme)
def TranslateImagePath(path, board, debug=False):
"""Start devserver to translate the xbuddy |path|.
Args:
path: The xbuddy path.
board: The default board to use if board is not specified in |path|.
debug: If True, prints the devserver log on response error.
Returns:
A translated path that uniquely identifies one build:
build-id/version/image_name
"""
ds = ds_wrapper.DevServerWrapper(static_dir=DEVSERVER_STATIC_DIR,
board=board)
req = GenerateXbuddyRequest(path, 'translate')
logging.info('Starting local devserver to get image path...')
try:
ds.Start()
return ds.OpenURL(ds.GetURL(sub_dir=req), timeout=60 * 15)
except ds_wrapper.DevServerResponseError as e:
logging.error('Unable to translate the image path: %s. Are you sure the '
'image path is correct? The board %s is used when no board '
'name is included in the image path.', path, board)
if debug:
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise ValueError('Cannot locate image %s: %s' % (path, e))
except ds_wrapper.DevServerException:
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
finally:
ds.Stop()
def GenerateXbuddyRequest(path, req_type):
"""Generate an xbuddy request used to retreive payloads.
This function generates a xbuddy request based on |path| and
|req_type|, which can be used to query the devserver. For request
type 'image' ('update'), the devserver will repond with a URL
pointing to the folder where the image (update payloads) is stored.
Args:
path: An xbuddy path (with or without xbuddy://).
req_type: xbuddy request type ('update', 'image', or 'translate').
Returns:
A xbuddy request.
"""
if req_type == 'update':
return 'xbuddy/%s?for_update=true&return_dir=true' % _GetXbuddyPath(path)
elif req_type == 'image':
return 'xbuddy/%s?return_dir=true' % _GetXbuddyPath(path)
elif req_type == 'translate':
return 'xbuddy_translate/%s' % _GetXbuddyPath(path)
else:
raise ValueError('Does not support xbuddy request type %s' % req_type)
def DevserverURLToLocalPath(url, static_dir, file_type):
"""Convert the devserver returned URL to a local path.
Devserver returns only the directory where the files are. This
function converts such a URL to a local path based on |file_type| so
that we can access the file without downloading it.
Args:
url: The URL returned by devserver (when return_dir=true).
static_dir: The static directory used by the devserver.
file_type: The image (in IMAGE_TYPE_TO_NAME) that we want to access.
Returns:
A local path to the file.
"""
# pylint: disable=E1101
# Example URL: http://localhost:8080/static/peppy-release/R33-5116.87.0
relative_path = urlparse.urlparse(url).path[len('/static/'):]
# Defaults to test image because that is how Xbuddy handles the path.
filename = IMAGE_TYPE_TO_NAME.get(file_type, IMAGE_TYPE_TO_NAME['test'])
# Expand the path because devserver may use symlinks.
real_path = osutils.ExpandPath(
os.path.join(static_dir, relative_path, filename))
# If devserver uses a symlink within chroot, and we are running
# outside of chroot, we need to convert the path.
if os.path.exists(real_path):
return real_path
else:
return cros_build_lib.FromChrootPath(real_path)
class USBImager(object):
"""Copy image to the target removable device."""
def __init__(self, device, board, image, debug=False, yes=False):
"""Initalizes USBImager."""
self.device = device
self.board = board if board else cros_build_lib.GetDefaultBoard()
self.image = image
self.debug = debug
self.debug_level = logging.DEBUG if debug else logging.INFO
self.yes = yes
def DeviceNameToPath(self, device_name):
return '/dev/%s' % device_name
def GetRemovableDeviceDescription(self, device):
"""Returns a informational description of the removable |device|.
Args:
device: the device name (e.g. sdc).
Returns:
A string describing |device| (e.g. Patriot Memory 7918 MB).
"""
desc = []
desc.append(osutils.GetDeviceInfo(device, keyword='manufacturer'))
desc.append(osutils.GetDeviceInfo(device, keyword='product'))
desc.append(osutils.GetDeviceSize(self.DeviceNameToPath(device)))
return ' '.join([x for x in desc if x])
def ListAllRemovableDevices(self):
"""Returns a list of removable devices.
Returns:
A list of device names (e.g. ['sdb', 'sdc']).
"""
devices = osutils.ListBlockDevices()
removable_devices = []
for d in devices:
if d.TYPE == 'disk' and d.RM == '1':
removable_devices.append(d.NAME)
return removable_devices
def ChooseRemovableDevice(self, devices):
"""Lists all removable devices and asks user to select/confirm.
Args:
devices: a list of device names (e.g. ['sda', 'sdb']).
Returns:
The device name chosen by the user.
"""
idx = cros_build_lib.GetChoice(
'Removable device(s) found. Please select/confirm to continue:',
[self.GetRemovableDeviceDescription(x) for x in devices])
return devices[idx]
def CopyImageToDevice(self, image, device):
"""Copies |image| to the removable |device|.
Args:
image: Path to the image to copy.
device: Device to copy to.
"""
# Use pv to display progress bar if possible.
cmd_base = 'pv -pretb'
try:
cros_build_lib.RunCommand(['pv', '--version'], print_cmd=False,
capture_output=True)
except cros_build_lib.RunCommandError:
cmd_base = 'cat'
cmd = '%s %s | dd of=%s bs=4M iflag=fullblock oflag=sync' % (
cmd_base, image, device)
cros_build_lib.SudoRunCommand(cmd, shell=True)
cros_build_lib.SudoRunCommand(['sync'], debug_level=self.debug_level)
def GetImagePathFromDevserver(self, path):
"""Gets image path from devserver.
Asks devserver to stage the image and convert the returned URL to a
local path to the image.
Args:
path: An xbuddy path with or without (xbuddy://).
Returns:
A local path to the image.
"""
ds = ds_wrapper.DevServerWrapper(static_dir=DEVSERVER_STATIC_DIR,
board=self.board)
req = GenerateXbuddyRequest(path, 'image')
logging.info('Starting a local devserver to stage image...')
try:
ds.Start()
url = ds.OpenURL(ds.GetURL(sub_dir=req), timeout=60 * 15)
except ds_wrapper.DevServerResponseError:
logging.warning('Could not download %s.', path)
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
else:
# Print out the log when debug is on.
logging.debug(ds.TailLog() or 'No devserver log is available.')
finally:
ds.Stop()
return DevserverURLToLocalPath(url, DEVSERVER_STATIC_DIR,
path.rsplit(os.path.sep)[-1])
def ChooseImageFromDirectory(self, dir_path):
"""Lists all image files in |dir_path| and ask user to select one."""
images = [x for x in os.listdir(dir_path) if
os.path.isfile(os.path.join(dir_path, x)) and x.endswith(".bin")]
idx = 0
if len(images) == 0:
raise ValueError('No image found in %s.' % dir_path)
elif len(images) > 1:
idx = cros_build_lib.GetChoice(
'Multiple images found in %s. Please select one to continue:' % (
dir_path), images)
return os.path.join(dir_path, images[idx])
def _GetImagePath(self):
"""Returns the image path to use."""
image_path = translated_path = None
if os.path.isfile(self.image):
image_path = self.image
elif os.path.isdir(self.image):
# Ask user which image (*.bin) in the folder to use.
image_path = self.ChooseImageFromDirectory(self.image)
else:
# Translate the xbuddy path to get the exact image to use.
translated_path = TranslateImagePath(self.image, self.board,
debug=self.debug)
# Convert the translated path to be used in a request.
xbuddy_path = ConvertTranslatedPath(self.image, translated_path)
image_path = self.GetImagePathFromDevserver(xbuddy_path)
logging.info('Using image %s', translated_path or image_path)
return image_path
def Run(self):
"""Image the removable device."""
devices = self.ListAllRemovableDevices()
if self.device:
# If user specified a device path, check if it exists.
if not os.path.exists(self.device):
cros_build_lib.Die('Device path %s does not exist.' % self.device)
# Then check if it is removable.
if self.device not in [self.DeviceNameToPath(x) for x in devices]:
msg = '%s is not a removable device.' % self.device
if not (self.yes or cros_build_lib.BooleanPrompt(
default=False, prolog=msg)):
cros_build_lib.Die('You can specify usb:// to choose from a list of '
'removable devices.')
target = None
if self.device:
# Get device name from path (e.g. sdc in /dev/sdc).
target = self.device.rsplit(os.path.sep, 1)[-1]
elif devices:
# Ask user to choose from the list.
target = self.ChooseRemovableDevice(devices)
else:
cros_build_lib.Die('No removable devices detected.')
image_path = self._GetImagePath()
try:
self.CopyImageToDevice(image_path, self.DeviceNameToPath(target))
except cros_build_lib.RunCommandError:
logging.error('Failed copying image to device %s',
self.DeviceNameToPath(target))
class FileImager(USBImager):
"""Copy image to the target path."""
def Run(self):
"""Copy the image to the path specified by self.device."""
if not os.path.exists(self.device):
cros_build_lib.Die('Path %s does not exist.' % self.device)
image_path = self._GetImagePath()
if os.path.isdir(self.device):
logging.info('Copying to %s',
os.path.join(self.device, os.path.basename(image_path)))
else:
logging.info('Copying to %s', self.device)
try:
shutil.copy(image_path, self.device)
except IOError:
logging.error('Failed to copy image %s to %s', image_path, self.device)
class DeviceUpdateError(Exception):
"""Thrown when there is an error during device update."""
class RemoteDeviceUpdater(object):
"""Performs update on a remote device."""
ROOTFS_FILENAME = 'update.gz'
STATEFUL_FILENAME = 'stateful.tgz'
DEVSERVER_PKG_DIR = os.path.join(constants.SOURCE_ROOT, 'src/platform/dev')
DEVSERVER_FILENAME = 'devserver.py'
STATEFUL_UPDATE_BIN = '/usr/bin/stateful_update'
UPDATE_ENGINE_BIN = 'update_engine_client'
UPDATE_CHECK_INTERVAL = 10
# Root working directory on the device. This directory is in the
# stateful partition and thus has enough space to store the payloads.
DEVICE_BASE_DIR = '/mnt/stateful_partition/cros-flash'
def __init__(self, ssh_hostname, ssh_port, image, stateful_update=True,
rootfs_update=True, clobber_stateful=False, reboot=True,
board=None, src_image_to_delta=None, wipe=True, debug=False,
yes=False, ping=True):
"""Initializes RemoteDeviceUpdater"""
if not stateful_update and not rootfs_update:
cros_build_lib.Die('No update operation to perform. Use -h to see usage.')
self.tempdir = tempfile.mkdtemp(prefix='cros-flash')
self.ssh_hostname = ssh_hostname
self.ssh_port = ssh_port
self.image = image
self.board = board
self.src_image_to_delta = src_image_to_delta
self.do_stateful_update = stateful_update
self.do_rootfs_update = rootfs_update
self.clobber_stateful = clobber_stateful
self.reboot = reboot
self.debug = debug
self.ping = ping
# Do not wipe if debug is set.
self.wipe = wipe and not debug
self.yes = yes
# The variables below are set if user passes an local image path.
# Used to store a copy of the local image.
self.image_tempdir = None
# Used to store a symlink in devserver's static_dir.
self.static_tempdir = None
@classmethod
def GetUpdateStatus(cls, device, keys=None):
"""Returns the status of the update engine on the |device|.
Retrieves the status from update engine and confirms all keys are
in the status.
Args:
device: A ChromiumOSDevice object.
keys: the keys to look for in the status result (defaults to
['CURRENT_OP']).
Returns:
A list of values in the order of |keys|.
"""
keys = ['CURRENT_OP'] if not keys else keys
result = device.RunCommand([cls.UPDATE_ENGINE_BIN, '--status'],
capture_output=True)
if not result.output:
raise Exception('Cannot get update status')
try:
status = cros_build_lib.LoadKeyValueFile(
cStringIO.StringIO(result.output))
except ValueError:
raise ValueError('Cannot parse update status')
values = []
for key in keys:
if key not in status:
raise ValueError('Missing %s in the update engine status')
values.append(status.get(key))
return values
def UpdateStateful(self, device, payload, clobber=False):
"""Update the stateful partition of the device.
Args:
device: The ChromiumOSDevice object to update.
payload: The path to the update payload.
clobber: Clobber stateful partition (defaults to False).
"""
# Copy latest stateful_update to device.
stateful_update_bin = cros_build_lib.FromChrootPath(
self.STATEFUL_UPDATE_BIN)
device.CopyToWorkDir(stateful_update_bin)
msg = 'Updating stateful partition'
logging.info('Copying stateful payload to device...')
device.CopyToWorkDir(payload)
cmd = ['sh',
os.path.join(device.work_dir,
os.path.basename(self.STATEFUL_UPDATE_BIN)),
os.path.join(device.work_dir, os.path.basename(payload))]
if clobber:
cmd.append('--stateful_change=clean')
msg += ' with clobber enabled'
logging.info('%s...', msg)
try:
device.RunCommand(cmd)
except cros_build_lib.RunCommandError:
logging.error('Faild to perform stateful partition update.')
def _CopyDevServerPackage(self, device, tempdir):
"""Copy devserver package to work directory of device.
Args:
device: The ChromiumOSDevice object to copy the package to.
tempdir: The directory to temporarily store devserver package.
"""
logging.info('Copying devserver package to device...')
src_dir = os.path.join(tempdir, 'src')
osutils.RmDir(src_dir, ignore_missing=True)
shutil.copytree(
self.DEVSERVER_PKG_DIR, src_dir,
ignore=shutil.ignore_patterns('*.pyc', 'tmp*', '.*', 'static', '*~'))
device.CopyToWorkDir(src_dir)
return os.path.join(device.work_dir, os.path.basename(src_dir))
def SetupRootfsUpdate(self, device):
"""Makes sure |device| is ready for rootfs update."""
logging.info('Checking if update engine is idle...')
status, = self.GetUpdateStatus(device)
if status == 'UPDATE_STATUS_UPDATED_NEED_REBOOT':
logging.info('Device needs to reboot before updating...')
device.Reboot()
status, = self.GetUpdateStatus(device)
if status != 'UPDATE_STATUS_IDLE':
raise DeviceUpdateError('Update engine is not idle. Status: %s' % status)
def UpdateRootfs(self, device, payload, tempdir):
"""Update the rootfs partition of the device.
Args:
device: The ChromiumOSDevice object to update.
payload: The path to the update payload.
tempdir: The directory to store temporary files.
"""
# Setup devserver and payload on the target device.
static_dir = os.path.join(device.work_dir, 'static')
payload_dir = os.path.join(static_dir, 'pregenerated')
src_dir = self._CopyDevServerPackage(device, tempdir)
device.RunCommand(['mkdir', '-p', payload_dir])
logging.info('Copying rootfs payload to device...')
device.CopyToDevice(payload, payload_dir)
devserver_bin = os.path.join(src_dir, self.DEVSERVER_FILENAME)
ds = ds_wrapper.RemoteDevServerWrapper(
device, devserver_bin, static_dir=static_dir, log_dir=device.work_dir)
logging.info('Updating rootfs partition')
try:
ds.Start()
omaha_url = ds.GetURL(sub_dir='update/pregenerated')
cmd = [self.UPDATE_ENGINE_BIN, '-check_for_update',
'-omaha_url=%s' % omaha_url]
device.RunCommand(cmd)
# Loop until update is complete.
while True:
op, progress = self.GetUpdateStatus(device, ['CURRENT_OP', 'PROGRESS'])
logging.info('Waiting for update...status: %s at progress %s',
op, progress)
if op == 'UPDATE_STATUS_UPDATED_NEED_REBOOT':
break
if op == 'UPDATE_STATUS_IDLE':
raise DeviceUpdateError(
'Update failed with unexpected update status: %s' % op)
time.sleep(self.UPDATE_CHECK_INTERVAL)
ds.Stop()
except Exception:
logging.error('Rootfs update failed.')
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
finally:
ds.Stop()
device.CopyFromDevice(ds.log_file,
os.path.join(tempdir, 'target_devserver.log'),
error_code_ok=True)
device.CopyFromDevice('/var/log/update_engine.log', tempdir,
follow_symlinks=True,
error_code_ok=True)
def ConvertLocalPathToXbuddyPath(self, path):
"""Converts |path| to an xbuddy path.
This function copies the image into a temprary directory in chroot
and creates a symlink in static_dir for devserver/xbuddy to
access.
Args:
path: Path to an image.
Returns:
The xbuddy path for |path|.
"""
self.image_tempdir = osutils.TempDir(
base_dir=cros_build_lib.FromChrootPath('/tmp'),
prefix='cros_flash_local_image',
sudo_rm=True)
tempdir_path = self.image_tempdir.tempdir
logging.info('Copying image to temporary directory %s', tempdir_path)
# Devserver only knows the image names listed in IMAGE_TYPE_TO_NAME.
# Rename the image to chromiumos_test_image.bin when copying.
TEMP_IMAGE_TYPE = 'test'
shutil.copy(path,
os.path.join(tempdir_path, IMAGE_TYPE_TO_NAME[TEMP_IMAGE_TYPE]))
chroot_path = cros_build_lib.ToChrootPath(tempdir_path)
# Create and link static_dir/local_imagexxxx/link to the image
# folder, so that xbuddy/devserver can understand the path.
# Alternatively, we can to pass '--image' at devserver startup,
# but this flag is deprecated.
self.static_tempdir = osutils.TempDir(base_dir=DEVSERVER_STATIC_DIR,
prefix='local_image',
sudo_rm=True)
relative_dir = os.path.join(os.path.basename(self.static_tempdir.tempdir),
'link')
symlink_path = os.path.join(DEVSERVER_STATIC_DIR, relative_dir)
logging.info('Creating a symlink %s -> %s', symlink_path, chroot_path)
os.symlink(chroot_path, symlink_path)
return os.path.join(relative_dir, TEMP_IMAGE_TYPE)
def GetUpdatePayloads(self, path, payload_dir, board=None,
src_image_to_delta=None, timeout=60 * 15):
"""Launch devserver to get the update payloads.
Args:
path: The xbuddy path.
payload_dir: The directory to store the payloads.
board: The default board to use when |path| is None.
src_image_to_delta: Image used as the base to generate the delta payloads.
timeout: Timeout for launching devserver (seconds).
"""
ds = ds_wrapper.DevServerWrapper(static_dir=DEVSERVER_STATIC_DIR,
src_image=src_image_to_delta, board=board)
req = GenerateXbuddyRequest(path, 'update')
logging.info('Starting local devserver to generate/serve payloads...')
try:
ds.Start()
url = ds.OpenURL(ds.GetURL(sub_dir=req), timeout=timeout)
ds.DownloadFile(os.path.join(url, self.ROOTFS_FILENAME), payload_dir)
ds.DownloadFile(os.path.join(url, self.STATEFUL_FILENAME), payload_dir)
except ds_wrapper.DevServerException:
logging.warning(ds.TailLog() or 'No devserver log is available.')
raise
else:
logging.debug(ds.TailLog() or 'No devserver log is available.')
finally:
ds.Stop()
if os.path.exists(ds.log_file):
shutil.copyfile(ds.log_file,
os.path.join(payload_dir, 'local_devserver.log'))
else:
logging.warning('Could not find %s', ds.log_file)
def _CheckPayloads(self, payload_dir):
"""Checks that all update payloads exists in |payload_dir|."""
filenames = []
filenames += [self.ROOTFS_FILENAME] if self.do_rootfs_update else []
filenames += [self.STATEFUL_FILENAME] if self.do_stateful_update else []
for fname in filenames:
payload = os.path.join(payload_dir, fname)
if not os.path.exists(payload):
cros_build_lib.Die('Payload %s does not exist!' % payload)
def Verify(self, old_root_dev, new_root_dev):
"""Verifies that the root deivce changed after reboot."""
assert new_root_dev and old_root_dev
if new_root_dev == old_root_dev:
raise DeviceUpdateError(
'Failed to boot into the new version. Possibly there was a '
'signing problem, or an automated rollback occurred because '
'your new image failed to boot.')
@classmethod
def GetRootDev(cls, device):
"""Get the current root device on |device|."""
rootdev = device.RunCommand(
['rootdev', '-s'], capture_output=True).output.strip()
logging.debug('Current root device is %s', rootdev)
return rootdev
def Cleanup(self):
"""Cleans up the temporary directory."""
if self.image_tempdir:
self.image_tempdir.Cleanup()
if self.static_tempdir:
self.static_tempdir.Cleanup()
if self.wipe:
logging.info('Cleaning up temporary working directory...')
osutils.RmDir(self.tempdir)
else:
logging.info('You can find the log files and/or payloads in %s',
self.tempdir)
def _CanRunDevserver(self, device, tempdir):
"""We can run devserver on |device|.
If the stateful partition is corrupted, Python or other packages
(e.g. cherrypy) that Cros Flash needs for rootfs update may be
missing on |device|.
Args:
device: A ChromiumOSDevice object.
tempdir: A temporary directory to store files.
Returns:
True if we can start devserver; False otherwise.
"""
logging.info('Checking if we can run devserver on the device.')
src_dir = self._CopyDevServerPackage(device, tempdir)
devserver_bin = os.path.join(src_dir, self.DEVSERVER_FILENAME)
try:
device.RunCommand(['python', devserver_bin, '--help'])
except cros_build_lib.RunCommandError as e:
logging.warning('Cannot start devserver: %s', e)
return False
return True
def Run(self):
"""Performs remote device update."""
old_root_dev, new_root_dev = None, None
try:
with remote_access.ChromiumOSDeviceHandler(
self.ssh_hostname, port=self.ssh_port,
base_dir=self.DEVICE_BASE_DIR, ping=self.ping) as device:
board = cros_build_lib.GetBoard(device_board=device.board,
override_board=self.board,
force=self.yes)
logging.info('Board is %s', board)
if os.path.isdir(self.image):
# If the given path is a directory, we use the provided
# update payload(s) in the directory.
payload_dir = self.image
logging.info('Using provided payloads in %s', payload_dir)
else:
if os.path.isfile(self.image):
# If the given path is an image, make sure devserver can
# access it and generate payloads.
logging.info('Using image %s', self.image)
image_path = self.ConvertLocalPathToXbuddyPath(self.image)
else:
# For xbuddy paths, we should do a sanity check / confirmation
# when the xbuddy board doesn't match the board on the
# device. Unfortunately this isn't currently possible since we
# don't want to duplicate xbuddy code. TODO(sosa):
# crbug.com/340722 and use it to compare boards.
# Translate the xbuddy path to get the exact image to use.
translated_path = TranslateImagePath(self.image, board,
debug=self.debug)
logging.info('Using image %s', translated_path)
# Convert the translated path to be used in the update request.
image_path = ConvertTranslatedPath(self.image, translated_path)
# Launch a local devserver to generate/serve update payloads.
payload_dir = self.tempdir
self.GetUpdatePayloads(image_path, payload_dir,
board=board,
src_image_to_delta=self.src_image_to_delta)
# Verify that all required payloads are in the payload directory.
self._CheckPayloads(payload_dir)
restore_stateful = False
if (not self._CanRunDevserver(device, self.tempdir) and
self.do_rootfs_update):
msg = ('Cannot start devserver! The stateful partition may be '
'corrupted. Cros Flash can try to restore the stateful '
'partition first.')
restore_stateful = self.yes or cros_build_lib.BooleanPrompt(
default=False, prolog=msg)
if not restore_stateful:
cros_build_lib.Die('Cannot continue to perform rootfs update!')
if restore_stateful:
logging.warning('Restoring the stateful partition...')
payload = os.path.join(payload_dir, self.STATEFUL_FILENAME)
self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
device.Reboot()
if self._CanRunDevserver(device, self.tempdir):
logging.info('Stateful partition restored.')
else:
cros_build_lib.Die('Unable to restore stateful partition. Exiting.')
# Perform device updates.
if self.do_rootfs_update:
self.SetupRootfsUpdate(device)
# Record the current root device. This must be done after
# SetupRootfsUpdate because SetupRootfsUpdate may reboot the
# device if there is a pending update, which changes the
# root device.
old_root_dev = self.GetRootDev(device)
payload = os.path.join(payload_dir, self.ROOTFS_FILENAME)
self.UpdateRootfs(device, payload, self.tempdir)
logging.info('Rootfs update completed.')
if self.do_stateful_update and not restore_stateful:
payload = os.path.join(payload_dir, self.STATEFUL_FILENAME)
self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
logging.info('Stateful update completed.')
if self.reboot:
logging.info('Rebooting device..')
device.Reboot()
if self.clobber_stateful:
# --clobber-stateful wipes the stateful partition and the
# working directory on the device no longer exists. To
# remedy this, we recreate the working directory here.
device.BaseRunCommand(['mkdir', '-p', device.work_dir])
if self.do_rootfs_update and self.reboot:
new_root_dev = self.GetRootDev(device)
self.Verify(old_root_dev, new_root_dev)
except Exception:
logging.error('Device update failed.')
raise
else:
logging.info('Update performed successfully.')
finally:
self.Cleanup()
@cros.CommandDecorator('flash')
class FlashCommand(cros.CrosCommand):
"""Update the device with an image.
This command updates the device with the image
(ssh://<hostname>:{port}, copies an image to a removable device
(usb://<device_path), or copies a xbuddy path to a local
file path with (file://file_path).
For device update, it assumes that device is able to accept ssh
connections.
For rootfs partition update, this command may launch a devserver to
generate payloads. As a side effect, it may create symlinks in
static_dir/others used by the devserver.
"""
EPILOG = """
To update/image the device with the latest locally built image:
cros flash device latest
cros flash device
To update/image the device with an xbuddy path:
cros flash device xbuddy://{local, remote}/<board>/<version>
Common xbuddy version aliases are 'latest' (alias for 'latest-stable')
latest-{dev, beta, stable, canary}, and latest-official.
To update/image the device with a local image path:
cros flash device /path/to/image.bin
Examples:
cros flash 192.168.1.7 xbuddy://remote/x86-mario/latest-canary
cros flash 192.168.1.7 xbuddy://remote/x86-mario-paladin/R32-4830.0.0-rc1
cros flash usb:// xbuddy://remote/trybot-x86-mario-paladin/R32-5189.0.0-b100
cros flash usb:///dev/sde xbuddy://peppy/latest
cros flash file:///~/images xbuddy://peppy/latest
For more information and known problems/fixes, please see:
http://dev.chromium.org/chromium-os/build/cros-flash
"""
SSH_MODE = 'ssh'
USB_MODE = 'usb'
FILE_MODE = 'file'
# Override base class property to enable stats upload.
upload_stats = True
@classmethod
def AddParser(cls, parser):
"""Add parser arguments."""
super(FlashCommand, cls).AddParser(parser)
parser.add_argument(
'device', help='ssh://device_hostname[:port] or usb://{device_path}. '
'If no device_path is given (i.e. usb://), user will be prompted to '
'choose from a list of removable devices.')
parser.add_argument(
'image', nargs='?', default='latest', help="A local path or an xbuddy "
"path: xbuddy://{local|remote}/board/version/{image_type} image_type "
"can be: 'test', 'dev', 'base', or 'recovery'. Note any strings that "
"do not map to a real file path will be converted to an xbuddy path "
"i.e., latest, will map to xbuddy://latest.")
parser.add_argument(
'--clear-cache', default=False, action='store_true',
help='Clear the devserver static directory. This deletes all the '
'downloaded images and payloads, and also payloads generated by '
'the devserver. Default is not to clear.')
update = parser.add_argument_group('Advanced device update options')
update.add_argument(
'--board', default=None, help='The board to use. By default it is '
'automatically detected. You can override the detected board with '
'this option')
update.add_argument(
'--yes', default=False, action='store_true',
help='Force yes to any prompt. Use with caution.')
update.add_argument(
'--no-reboot', action='store_false', dest='reboot', default=True,
help='Do not reboot after update. Default is always reboot.')
update.add_argument(
'--no-wipe', action='store_false', dest='wipe', default=True,
help='Do not wipe the temporary working directory. Default '
'is always wipe.')
update.add_argument(
'--no-stateful-update', action='store_false', dest='stateful_update',
help='Do not update the stateful partition on the device. '
'Default is always update.')
update.add_argument(
'--no-rootfs-update', action='store_false', dest='rootfs_update',
help='Do not update the rootfs partition on the device. '
'Default is always update.')
update.add_argument(
'--src-image-to-delta', type='path',
help='Local path to an image to be used as the base to generate '
'delta payloads.')
update.add_argument(
'--clobber-stateful', action='store_true', default=False,
help='Clobber stateful partition when performing update.')
update.add_argument(
'--no-ping', dest='ping', action='store_false', default=True,
help='Do not ping the device before attempting to connect to it.')
def __init__(self, options):
"""Initializes cros flash."""
cros.CrosCommand.__init__(self, options)
self.run_mode = None
self.ssh_hostname = None
self.ssh_port = None
self.usb_dev = None
self.copy_path = None
self.any = False
def _ParseDevice(self, device):
"""Parse |device| and set corresponding variables ."""
# pylint: disable=E1101
if urlparse.urlparse(device).scheme == '':
# For backward compatibility, prepend ssh:// ourselves.
device = 'ssh://%s' % device
parsed = urlparse.urlparse(device)
if parsed.scheme == self.SSH_MODE:
self.run_mode = self.SSH_MODE
self.ssh_hostname = parsed.hostname
self.ssh_port = parsed.port
elif parsed.scheme == self.USB_MODE:
self.run_mode = self.USB_MODE
self.usb_dev = device[len('%s://' % self.USB_MODE):]
elif parsed.scheme == self.FILE_MODE:
self.run_mode = self.FILE_MODE
self.copy_path = device[len('%s://' % self.FILE_MODE):]
else:
cros_build_lib.Die('Does not support device %s' % device)
# pylint: disable=E1101
def Run(self):
"""Perfrom the cros flash command."""
self.options.Freeze()
if self.options.clear_cache:
logging.info('Clearing the cache...')
ds_wrapper.DevServerWrapper.WipeStaticDirectory(DEVSERVER_STATIC_DIR)
try:
osutils.SafeMakedirsNonRoot(DEVSERVER_STATIC_DIR)
except OSError:
logging.error('Failed to create %s', DEVSERVER_STATIC_DIR)
self._ParseDevice(self.options.device)
try:
if self.run_mode == self.SSH_MODE:
logging.info('Preparing to update the remote device %s',
self.options.device)
updater = RemoteDeviceUpdater(
self.ssh_hostname,
self.ssh_port,
self.options.image,
board=self.options.board,
src_image_to_delta=self.options.src_image_to_delta,
rootfs_update=self.options.rootfs_update,
stateful_update=self.options.stateful_update,
clobber_stateful=self.options.clobber_stateful,
reboot=self.options.reboot,
wipe=self.options.wipe,
debug=self.options.debug,
yes=self.options.yes,
ping=self.options.ping)
# Perform device update.
updater.Run()
elif self.run_mode == self.USB_MODE:
path = osutils.ExpandPath(self.usb_dev) if self.usb_dev else ''
logging.info('Preparing to image the removable device %s', path)
imager = USBImager(path,
self.options.board,
self.options.image,
debug=self.options.debug,
yes=self.options.yes)
imager.Run()
elif self.run_mode == self.FILE_MODE:
path = osutils.ExpandPath(self.copy_path) if self.copy_path else ''
logging.info('Preparing to copy image to %s', path)
imager = FileImager(path,
self.options.board,
self.options.image,
debug=self.options.debug,
yes=self.options.yes)
imager.Run()
except (Exception, KeyboardInterrupt) as e:
logging.error(e)
logging.error('Cros Flash failed before completing.')
if self.options.debug:
raise
else:
logging.info('Cros Flash completed successfully.')
|
chadversary/chromiumos.chromite
|
cros/commands/cros_flash.py
|
Python
|
bsd-3-clause
| 38,621
|
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cloudengine import ce_is_is_instance
from units.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from units.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_is_is_instance
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible.modules.network.cloudengine.ce_is_is_instance.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'before.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'after.txt')
update = ['isis 100', 'vpn-instance __public__']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='present')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
def test_isis_instance_present(self):
xml_existing = load_fixture('ce_is_is_instance', 'after.txt')
xml_end_state = load_fixture('ce_is_is_instance', 'before.txt')
update = ['undo isis 100']
self.get_nc_config.side_effect = (xml_existing, xml_end_state)
config = dict(
instance_id=100,
vpn_name='__public__',
state='absent')
set_module_args(config)
result = self.execute_module(changed=True)
self.assertEquals(sorted(result['updates']), sorted(update))
|
roadmapper/ansible
|
test/units/modules/network/cloudengine/test_ce_is_is_instance.py
|
Python
|
gpl-3.0
| 2,883
|
from main import app
from flask import render_template, request
from config import FREEZER_BASE_URL
@app.route('/')
def index():
page_title = "Timeline: A Year Of 'Systemic Failure' At DCF"
page_url = FREEZER_BASE_URL.rstrip('/') + request.path
social = {
'title': "Timeline: A Year Of 'Systemic Failure' At DCF",
'subtitle': "This year has brought a series of tragedies and revelations about problems at Vermont's Department for Children and Families. VPR looks back over the months.",
'img': "http://www.vpr.net/apps/timeline-dcf-systemic-failure/static/img/timeline-social-snap.png",
'description': "This year has brought a series of tragedies and revelations about problems at Vermont's Department for Children and Families. VPR looks back over the months.",
'twitter_text': "Timeline: A Year Of 'Systemic Failure' At DCF",
'creator': "Taylor Dobbs and Angela Evancie",
'twitter_hashtag': "VT"
}
return render_template('content.html',
page_title=page_title,
page_url=page_url,
social=social)
|
vprnet/timeline-dcf-systemic-failure
|
main/views.py
|
Python
|
apache-2.0
| 1,100
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
__version__ = "3.0.0"
__bibtex__ = """
@article{emcee,
author = {{Foreman-Mackey}, D. and {Hogg}, D.~W. and {Lang}, D. and {Goodman}, J.},
title = {emcee: The MCMC Hammer},
journal = {PASP},
year = 2013,
volume = 125,
pages = {306-312},
eprint = {1202.3665},
doi = {10.1086/670067}
}
""" # NOQA
try:
__EMCEE_SETUP__
except NameError:
__EMCEE_SETUP__ = False
if not __EMCEE_SETUP__:
from .ensemble import EnsembleSampler
from .state import State
from . import moves
from . import autocorr
from . import backends
__all__ = ["EnsembleSampler", "State", "moves", "autocorr", "backends"]
|
farr/emcee
|
emcee/__init__.py
|
Python
|
mit
| 731
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011, 2012 Pablo Barenbaum <foones@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import pygobstoneslang.common.position as position
import bnf_parser
import ast
import gbs_builtins
import pygobstoneslang.common.i18n as i18n
from pygobstoneslang.common.utils import *
import grammar.i18n
from gbs_api import GobstonesOptions
from grammar import XGbsGrammarFile
#### Parser of Gobstones programs.
####
#### Complements gbs_grammar.bnf solving conflicts and
#### checking for errors.
class GbsParserException(bnf_parser.ParserException):
pass
TOKEN_BUFFER_SIZE = 4
class GbsLexer(bnf_parser.Lexer):
"""Lexical analyzer that identifies common lexical errors in Gobstones
source files and solves the conflictive situation in the LL(1) grammar
by inserting an extra semicolon (;) in the appropiate case."""
def __init__(self, tokens, reserved, *args, **kwargs):
bnf_parser.Lexer.__init__(self, tokens, reserved, *args, **kwargs)
self.reserved_lower = {}
for x in reserved:
self.reserved_lower[x.lower()] = x
for x in gbs_builtins.get_correct_names():
self.reserved_lower[x.lower()] = x
def _tokenize_solve_conflict(self, string, filename):
self.supertok = bnf_parser.Lexer.tokenize(self, string, filename)
self.token_queue = []
q = []
try:
self.token_mem = [self._read()]
yield self.token_mem[0]
while True:
curr_tok = self._read()
self.token_mem.append(curr_tok)
if self._conflictive_case():
# scan input, balancing parentheses
op = 1
q = [curr_tok]
while op > 0:
tok = self._read()
q.append(tok)
if tok.type == '(':
op += 1
elif tok.type == ')':
op -= 1
tok = self._read()
q.append(tok)
if tok.type == ':=': # insert a semicolon
yield bnf_parser.Token(';', ';', curr_tok.pos_begin, curr_tok.pos_end)
self.token_queue = q + self.token_queue
q = []
else:
yield curr_tok
if len(self.token_mem) > TOKEN_BUFFER_SIZE:
self.token_mem.pop(0)
except StopIteration:
for tok in q: yield tok
def _read(self):
if self.token_queue == []:
return next(self.supertok)
else:
return self.token_queue.pop(0)
def _conflictive_case(self):
return len(self.token_mem) >= 3 and \
self.token_mem[-3].type == ':=' and \
self.token_mem[-2].type == 'lowerid' and \
self.token_mem[-1].type == '('
def tokenize(self, string, filename='...'):
supertok = self._tokenize_solve_conflict(string, filename)
previous_token = next(supertok)
yield previous_token
open_parens = []
for tok in supertok:
area = position.ProgramAreaNear(tok)
if tok.type in ['upperid', 'lowerid']:
self.warn_if_similar_to_reserved(tok.value, area)
if tok.type == 'ERROR':
msg = i18n.i18n('Malformed input - unrecognized symbol')
raise GbsParserException(msg, position.ProgramAreaNear(tok))
elif tok.type == 'string_start':
msg = i18n.i18n('Unterminated string')
raise GbsParserException(msg, position.ProgramAreaNear(tok))
""" [TODO] Replace this checking by similar check for 'procedure lowerid.upperid' """
if False and previous_token.type in ['procedure'] and tok.type not in ['upperid']:
l1 = i18n.i18n('Found: %s') % tok
l2 = i18n.i18n('procedure name should be an uppercase identifier')
raise GbsParserException('\n'.join([l1, l2]), area)
elif previous_token.type in ['function'] and tok.type not in ['lowerid']:
l1 = i18n.i18n('Found: %s') % tok
l2 = i18n.i18n('function name should be a lowercase identifier')
raise GbsParserException('\n'.join([l1, l2]), area)
elif tok.type in [',', ';', 'not', 'num', 'string'] and previous_token.type == tok.type:
raise GbsParserException(i18n.i18n('Repeated symbol: %s') % (tok,), area)
elif (previous_token.type, tok.type) in [(',', ')'),
(';', ')'),
('(', ','),
]:
msg = i18n.i18n('%s cannot be followed by %s') % (previous_token, tok,)
raise GbsParserException(msg, area)
elif previous_token.type == 'return' and tok.type != '(':
raise GbsParserException(i18n.i18n('return must be followed by "("'), area)
elif len(self.token_mem) >= 3 and self.token_mem[-3].type == 'THROW_ERROR' and self.token_mem[-1].type != 'string':
raise GbsParserException(i18n.i18n('THROW_ERROR can only accept a string'), area)
# check opening/closing parens and braces
if tok.type in ['(', '{']:
open_parens.append(tok)
elif tok.type in [')', '}']:
# check if there is an opening token
if len(open_parens) == 0:
if tok.type == ')':
msg = 'Found closing ")" with no matching open paren'
else:
msg = 'Found closing "}" with no matching open brace'
raise GbsParserException(i18n.i18n(msg), area)
# check if the opening token is of the right kind
opening = open_parens.pop()
if opening.type == '(' and tok.type != ')':
open_area = position.ProgramAreaNear(opening)
l1 = i18n.i18n('Found open "(" with no matching closing paren')
l2 = i18n.i18n('Maybe there is an extra "%s" at %s') % (
tok.type, tok.pos_begin.row_col(),)
raise GbsParserException(i18n.i18n('\n'.join([l1, l2])), open_area)
elif opening.type == '{' and tok.type != '}':
open_area = position.ProgramAreaNear(opening)
l1 = i18n.i18n('Found open "{" with no matching closing brace')
l2 = i18n.i18n('Maybe there is an extra "%s" at %s') % (
tok.type, tok.pos_begin.row_col(),)
raise GbsParserException(i18n.i18n('\n'.join([l1, l2])), open_area)
# check there are no open parens at EOF
if tok.type == 'EOF' and len(open_parens) > 0:
opening = open_parens[-1]
open_area = position.ProgramAreaNear(opening)
if opening.type == '(':
msg = i18n.i18n('Found end of file but there are open parens yet')
raise GbsParserException(i18n.i18n(msg), open_area)
elif opening.type == '{':
msg = i18n.i18n('Found end of file but there are open braces yet')
raise GbsParserException(i18n.i18n(msg), open_area)
yield tok
previous_token = tok
def warn_if_similar_to_reserved(self, value, area):
tl = value.lower()
if tl in self.reserved_lower and self.reserved_lower[tl] != value:
raise GbsParserException(i18n.i18n('Found: %s\nMaybe should be: %s') % (
value, self.reserved_lower[tl]), area)
class GbsParser(bnf_parser.Parser):
"Parser that identifies common parsing errors in Gobstones source files."
def __init__(self, syntax, *args, **kwargs):
bnf_parser.Parser.__init__(self, syntax, *args, **kwargs)
def parse_error(self, nonterminal, previous_token, token):
"Raises a GbstonesParserException describing a parse error."
area = position.ProgramAreaNear(token)
if previous_token.type == 'lowerid' and token.type == '(':
raise GbsParserException(i18n.i18n('Cannot call a function here'), area)
elif previous_token.type == 'upperid' and token.type == '(':
raise GbsParserException(i18n.i18n('Cannot call a procedure here'), area)
elif previous_token.type == 'upperid' and token.type != '(':
msg = i18n.i18n('Procedure name "%s" is missing a "("') % (previous_token.value,)
raise GbsParserException(msg, area)
elif token.type == 'EOF':
raise GbsParserException(i18n.i18n('Premature end of input'), area)
bnf_parser.Parser.parse_error(self, nonterminal, previous_token, token)
class GbsAnalyzer(bnf_parser.Analyzer):
def __init__(self, grammar, warn=std_warn):
bnf_parser.Analyzer.__init__(self, GbsLexer, GbsParser, bnf_contents=grammar, warn=warn)
def create_analizer(grammar_file):
bnf = grammar.i18n.translate(read_file(grammar_file))
return GbsAnalyzer(bnf)
def check_grammar_conflicts(grammar_file):
"""Checks if the BNF grammar has any conflict (an LL(1) prediction
with two productions)."""
create_analizer(grammar_file).parser.check_conflicts()
def parse_string(string, filename='...', toplevel_filename=None, grammar_file=XGbsGrammarFile):
"Parse a string and return an abstract syntax tree."
analyzer = create_analizer(grammar_file)
parsing_stream = analyzer.parse(string, filename)
start_pos = position.Position(string, filename)
tree = ast.ASTBuilder(start_pos).build_ast_from(parsing_stream)
tree.source_filename = filename
if toplevel_filename is None:
tree.toplevel_filename = tree.source_filename
else:
tree.toplevel_filename = toplevel_filename
return tree
def prelude_for_file(filename):
prelude_basename = i18n.i18n('Prelude') + '.gbs'
prelude_filename = os.path.join(os.path.dirname(filename), prelude_basename)
if os.path.exists(prelude_filename) and os.path.basename(filename) != prelude_basename:
return prelude_filename
else:
return None
def get_names(program_text):
regexp = re.compile("(?:(type)\s*([A-Z][A-Za-z_']*)|(?:(procedure)\s*([A-Z][A-Za-z_']*)|(function)[\s]*([a-z][A-Za-z_']*))\s*\(\s*([^)]*?)\s*\))")
matches = regexp.findall(program_text)
names = {}
for parts in matches:
nametype, name = filter(lambda x: x != '', parts[0:6])
names[name] = { "type" : nametype}
if nametype in ["function", "procedure"]:
parameters = parts[6].replace(" ", "").replace("\n", "").replace("\t", "").split(",")
names[name]["parameters"] = parameters
return names
def parse_names(string, filename, toplevel_filename=None, grammar_file=XGbsGrammarFile):
names = get_names(string)
prelude_filename = prelude_for_file(filename)
if prelude_filename is not None:
names.update(get_names(open(prelude_filename).read()))
return names
def parse_string_try_prelude(string, filename, toplevel_filename=None, grammar_file=XGbsGrammarFile):
main_program = parse_string(string, filename, toplevel_filename, grammar_file)
prelude_filename = prelude_for_file(filename)
if prelude_filename is not None:
prelude_barename = i18n.i18n('Prelude')
pos = position.Position(string, filename)
main_imports = main_program.children[1].children
main_imports.insert(0, ast.ASTNode([
'import',
bnf_parser.Token('upperid', prelude_barename, pos, pos),
ast.ASTNode([
bnf_parser.Token('lowerid', '*', pos, pos),
], pos, pos)
], pos, pos))
return main_program
def parse_file(filename, grammar_file=XGbsGrammarFile):
"Parse a file and return an abstract syntax tree."
return parse_string_try_prelude(read_file(filename), filename, grammar_file)
def token_stream(string, grammar_file):
return create_analizer(grammar_file).lexer.pure_tokenize(string)
|
gobstones/PyGobstones-Lang
|
pygobstoneslang/lang/gbs_parser.py
|
Python
|
gpl-3.0
| 12,958
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.