text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""RL advantage estimators."""
import gin
import numpy as np
from trax import fastmath
common_args = ['gamma', 'margin']
def mask_discount(discount, discount_mask):
"""Computes a discount to apply at a given timestep, based on the mask."""
return fastmath.numpy.where(discount_mask, discount, 1.0)
def discounted_returns(rewards, gammas):
"""Computes discounted returns for a trajectory or a batch of them."""
returns = np.zeros_like(rewards)
ret = 0.0
for i in reversed(range(rewards.shape[-1])):
ret = rewards[..., i] + gammas[..., i] * ret
returns[..., i] = ret
return returns
@gin.configurable(denylist=common_args)
def monte_carlo(gamma, margin):
"""Calculate Monte Carlo advantage.
We assume the values are a tensor of shape [batch_size, length] and this
is the same shape as rewards and returns.
Args:
gamma: float, gamma parameter for TD from the underlying task
margin: number of extra steps in the sequence
Returns:
Function (rewards, returns, values, dones) -> advantages, where advantages
advantages is an array of shape [batch_size, length - margin].
"""
del gamma
def estimator(rewards, returns, values, dones, discount_mask):
del discount_mask
(_, length) = returns.shape
# Make sure that the future returns and values at "done" states are zero.
returns[dones] = rewards[dones]
values[dones] = 0
return (returns - values)[:, :(length - margin)]
return estimator
@gin.configurable(denylist=common_args)
def td_k(gamma, margin):
"""Calculate TD-k advantage.
The k parameter is assumed to be the same as margin.
We calculate advantage(s_i) as:
gamma^n_steps * value(s_{i + n_steps}) - value(s_i) + discounted_rewards
where discounted_rewards is the sum of rewards in these steps with
discounting by powers of gamma.
Args:
gamma: float, gamma parameter for TD from the underlying task
margin: number of extra steps in the sequence
Returns:
Function (rewards, returns, values, dones) -> advantages, where advantages
advantages is an array of shape [batch_size, length - margin].
"""
def estimator(rewards, returns, values, dones, discount_mask):
del returns
gammas = mask_discount(gamma, discount_mask)
# Here we calculate advantage with TD-k, where k=margin.
k = margin
assert k > 0
advantages = np.zeros_like(values[:, k:])
discount = 1.0
for i in range(margin):
advantages += discount * rewards[:, i:-(margin - i)]
discount *= gammas[:, i:-(margin - i)]
advantages += discount * values[:, k:]
# Zero out the future returns at "done" states.
dones = dones[:, :-k]
# TPU friendly version of the formula
# advantages[dones] = rewards[:, :-k][dones]
advantages = fastmath.index_update(advantages,
dones,
rewards[:, :-k][dones])
# Subtract the baseline (value).
advantages -= values[:, :-k]
return advantages
return estimator
@gin.configurable(denylist=common_args)
def td_lambda(gamma, margin, lambda_=0.95):
"""Calculate TD-lambda advantage.
The estimated return is an exponentially-weighted average of different TD-k
returns.
Args:
gamma: float, gamma parameter for TD from the underlying task
margin: number of extra steps in the sequence
lambda_: float, the lambda parameter of TD-lambda
Returns:
Function (rewards, returns, values, dones) -> advantages, where advantages
advantages is an array of shape [batch_size, length - margin].
"""
def estimator(rewards, returns, values, dones, discount_mask):
gammas = mask_discount(gamma, discount_mask)
lambdas = mask_discount(lambda_, discount_mask)
td_returns = np.zeros_like(returns)
(_, length) = returns.shape
td_returns[:, -1] = values[:, -1]
for i in reversed(range(length - 1)):
lambda_i = lambdas[:, i]
td_returns[:, i] = rewards[:, i] + (1 - dones[:, i]) * gammas[:, i] * (
(1 - lambda_i) * values[:, i + 1] + lambda_i * td_returns[:, i + 1]
)
return (td_returns - values)[:, :(returns.shape[1] - margin)]
return estimator
@gin.configurable(denylist=common_args)
def gae(gamma, margin, lambda_=0.95):
"""Calculate Generalized Advantage Estimation.
Calculate state values bootstrapping off the following state values -
Generalized Advantage Estimation https://arxiv.org/abs/1506.02438
Args:
gamma: float, gamma parameter for TD from the underlying task
margin: number of extra steps in the sequence
lambda_: float, the lambda parameter of GAE
Returns:
Function (rewards, returns, values, dones) -> advantages, where advantages
advantages is an array of shape [batch_size, length - margin].
"""
def estimator(rewards, returns, values, dones, discount_mask):
del returns
gammas = mask_discount(gamma, discount_mask)
lambdas = mask_discount(lambda_, discount_mask)
advantages = np.zeros_like(rewards)
(_, length) = rewards.shape
for i in reversed(range(length - 1)):
bellman_delta = rewards[:, i] - values[:, i] + (1 - dones[:, i]) * (
gammas[:, i] * values[:, i + 1]
)
advantages[:, i] = bellman_delta + (1 - dones[:, i]) * (
gammas[:, i] * lambdas[:, i] * advantages[:, i + 1]
)
return advantages[:, :(rewards.shape[1] - margin)]
return estimator
|
{
"content_hash": "baa678503037197b99aad49ffb914f81",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 78,
"avg_line_length": 33.67701863354037,
"alnum_prop": 0.6575064551825894,
"repo_name": "google/trax",
"id": "3825a856a902d8abc6a297d4c3507acc658f7252",
"size": "6019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trax/rl/advantages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2441406"
},
{
"name": "Python",
"bytes": "2582897"
},
{
"name": "Shell",
"bytes": "6619"
}
],
"symlink_target": ""
}
|
import importlib
from selenium import webdriver
from SeleniumLibrary import BrowserManagementKeywords
from SeleniumLibrary.utils import is_truthy, is_falsy
from SeleniumLibrary.keywords.webdrivertools import WebDriverCreator, SeleniumOptions
from msedge.selenium_tools import Edge as EdgePluginDriver
class EdgePlugin(BrowserManagementKeywords):
"""
This plugin is used to adapt SeleniumLibrary to run Edge browser
(even chromium based version) with stable selenium version (3.141).
It uses special msedge-selenium-tools
that allows driving the new Microsoft Edge (Chromium) browser
and use the latest functionality with no need to update to the alpha 4th selenium version.
"""
def __init__(self, ctx):
BrowserManagementKeywords.__init__(self, ctx)
self._webdriver_creator = _EdgePluginWebDriverCreator(self.log_dir)
class _EdgePluginWebDriverCreator(WebDriverCreator):
def __init__(self, log_dir):
super().__init__(log_dir)
self.log_dir = log_dir
self.selenium_options = _EdgePluginSeleniumOptions()
def create_edge(self, desired_capabilities, remote_url, options=None, service_log_path=None,
executable_path='msedgedriver.exe'):
if is_truthy(remote_url):
defaul_caps = webdriver.DesiredCapabilities.EDGE.copy()
desired_capabilities = self._remote_capabilities_resolver(
desired_capabilities, defaul_caps)
return self._remote(desired_capabilities, remote_url)
if is_falsy(executable_path):
executable_path = self._get_executable_path(EdgePluginDriver)
return EdgePluginDriver(options=options,
service_log_path=service_log_path,
executable_path=executable_path,
**desired_capabilities)
class _EdgePluginSeleniumOptions(SeleniumOptions):
def _import_options(self, browser):
if browser == 'edge':
options = importlib.import_module('msedge.selenium_tools.options')
return options.Options
return super(_EdgePluginSeleniumOptions, self)._import_options(browser)
|
{
"content_hash": "bb523cead90537d9d5f759cfd4ec5638",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 45.666666666666664,
"alnum_prop": 0.6884124087591241,
"repo_name": "Accruent/zoomba",
"id": "ee2ca5e12d07f7b9d04c5527b6f491ca0e51f25c",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Zoomba/Helpers/EdgePlugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "205191"
},
{
"name": "RobotFramework",
"bytes": "24677"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="isosurface", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "b9d583c444da07b413f2debfb5df2a52",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 36.61538461538461,
"alnum_prop": 0.592436974789916,
"repo_name": "plotly/plotly.py",
"id": "a3357c3ab92e77d2c7b98d8e2f6d7ac18d4eaa28",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/_opacity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""Utilities for testing `LinearOperator` and sub-classes."""
import abc
import itertools
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load as load_model
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import save as save_model
from tensorflow.python.util import nest
class OperatorShapesInfo:
"""Object encoding expected shape for a test.
Encodes the expected shape of a matrix for a test. Also
allows additional metadata for the test harness.
"""
def __init__(self, shape, **kwargs):
self.shape = shape
self.__dict__.update(kwargs)
class CheckTapeSafeSkipOptions:
# Skip checking this particular method.
DETERMINANT = "determinant"
DIAG_PART = "diag_part"
LOG_ABS_DETERMINANT = "log_abs_determinant"
TRACE = "trace"
class LinearOperatorDerivedClassTest(test.TestCase, metaclass=abc.ABCMeta):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y, check_dtype=False):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
if check_dtype:
self.assertDTypeEqual(x, y.dtype)
@staticmethod
def adjoint_options():
return [False, True]
@staticmethod
def adjoint_arg_options():
return [False, True]
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@staticmethod
def use_placeholder_options():
return [False, True]
@staticmethod
def use_blockwise_arg():
return False
@staticmethod
def operator_shapes_infos():
"""Returns list of OperatorShapesInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_shapes_infos has not been implemented.")
@abc.abstractmethod
def operator_and_matrix(
self, shapes_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shapes_info: `OperatorShapesInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_rhs is not defined.")
@abc.abstractmethod
def make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_x is not defined.")
@staticmethod
def skip_these_tests():
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
@staticmethod
def optional_tests():
"""List of optional test names to run."""
# Subclasses should over-ride if they want to add optional tests.
# To add "test_foo", add "foo" to this list.
return []
def assertRaisesError(self, msg):
"""assertRaisesRegexp or OpError, depending on context.executing_eagerly."""
if context.executing_eagerly():
return self.assertRaisesRegexp(Exception, msg)
return self.assertRaisesOpError(msg)
def check_convert_variables_to_tensors(self, operator):
"""Checks that internal Variables are correctly converted to Tensors."""
self.assertIsInstance(operator, composite_tensor.CompositeTensor)
tensor_operator = composite_tensor.convert_variables_to_tensors(operator)
self.assertIs(type(operator), type(tensor_operator))
self.assertEmpty(tensor_operator.variables)
self._check_tensors_equal_variables(operator, tensor_operator)
def _check_tensors_equal_variables(self, obj, tensor_obj):
"""Checks that Variables in `obj` have equivalent Tensors in `tensor_obj."""
if isinstance(obj, variables.Variable):
self.assertAllClose(ops.convert_to_tensor(obj),
ops.convert_to_tensor(tensor_obj))
elif isinstance(obj, composite_tensor.CompositeTensor):
params = getattr(obj, "parameters", {})
tensor_params = getattr(tensor_obj, "parameters", {})
self.assertAllEqual(params.keys(), tensor_params.keys())
self._check_tensors_equal_variables(params, tensor_params)
elif nest.is_mapping(obj):
for k, v in obj.items():
self._check_tensors_equal_variables(v, tensor_obj[k])
elif nest.is_nested(obj):
for x, y in zip(obj, tensor_obj):
self._check_tensors_equal_variables(x, y)
else:
# We only check Tensor, CompositeTensor, and nested structure parameters.
pass
def check_tape_safe(self, operator, skip_options=None):
"""Check gradients are not None w.r.t. operator.variables.
Meant to be called from the derived class.
This ensures grads are not w.r.t every variable in operator.variables. If
more fine-grained testing is needed, a custom test should be written.
Args:
operator: LinearOperator. Exact checks done will depend on hints.
skip_options: Optional list of CheckTapeSafeSkipOptions.
Makes this test skip particular checks.
"""
skip_options = skip_options or []
if not operator.variables:
raise AssertionError("`operator.variables` was empty")
def _assert_not_none(iterable):
for item in iterable:
self.assertIsNotNone(item)
# Tape tests that can be run on every operator below.
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.adjoint().to_dense(), operator.variables))
x = math_ops.cast(
array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype)
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.matvec(x), operator.variables))
# Tests for square, but possibly non-singular operators below.
if not operator.is_square:
return
for option in [
CheckTapeSafeSkipOptions.DETERMINANT,
CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT,
CheckTapeSafeSkipOptions.DIAG_PART,
CheckTapeSafeSkipOptions.TRACE,
]:
with backprop.GradientTape() as tape:
if option not in skip_options:
_assert_not_none(
tape.gradient(getattr(operator, option)(), operator.variables))
# Tests for non-singular operators below.
if operator.is_non_singular is False: # pylint: disable=g-bool-id-comparison
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.inverse().to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.solvevec(x), operator.variables))
# Tests for SPD operators below.
if not (operator.is_self_adjoint and operator.is_positive_definite):
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.cholesky().to_dense(), operator.variables))
# pylint:disable=missing-docstring
def _test_slicing(use_placeholder, shapes_info, dtype):
def test_slicing(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
batch_shape = shapes_info.shape[:-2]
# Don't bother slicing for uninteresting batch shapes.
if not batch_shape or batch_shape[0] <= 1:
return
slices = [slice(1, -1)]
if len(batch_shape) > 1:
# Slice out the last member.
slices += [..., slice(0, 1)]
sliced_operator = operator[slices]
matrix_slices = slices + [slice(None), slice(None)]
sliced_matrix = mat[matrix_slices]
sliced_op_dense = sliced_operator.to_dense()
op_dense_v, mat_v = sess.run([sliced_op_dense, sliced_matrix])
self.assertAC(op_dense_v, mat_v)
return test_slicing
def _test_to_dense(use_placeholder, shapes_info, dtype):
def test_to_dense(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_dense.shape)
op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
return test_to_dense
def _test_det(use_placeholder, shapes_info, dtype):
def test_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape[:-2], op_det.shape)
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
return test_det
def _test_log_abs_det(use_placeholder, shapes_info, dtype):
def test_log_abs_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
shapes_info.shape[:-2], op_log_abs_det.shape)
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
return test_log_abs_det
def _test_operator_matmul_with_same_type(use_placeholder, shapes_info, dtype):
"""op_a.matmul(op_b), in the case where the same type is returned."""
def test_operator_matmul_with_same_type(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator_a, mat_a = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
operator_b, mat_b = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
mat_matmul = math_ops.matmul(mat_a, mat_b)
op_matmul = operator_a.matmul(operator_b)
mat_matmul_v, op_matmul_v = sess.run([mat_matmul, op_matmul.to_dense()])
self.assertIsInstance(op_matmul, operator_a.__class__)
self.assertAC(mat_matmul_v, op_matmul_v)
return test_operator_matmul_with_same_type
def _test_operator_solve_with_same_type(use_placeholder, shapes_info, dtype):
"""op_a.solve(op_b), in the case where the same type is returned."""
def test_operator_solve_with_same_type(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator_a, mat_a = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
operator_b, mat_b = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(mat_a, mat_b)
op_solve = operator_a.solve(operator_b)
mat_solve_v, op_solve_v = sess.run([mat_solve, op_solve.to_dense()])
self.assertIsInstance(op_solve, operator_a.__class__)
self.assertAC(mat_solve_v, op_solve_v)
return test_operator_solve_with_same_type
def _test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.shape,
mat_matmul.shape)
# If the operator is blockwise, test both blockwise `x` and `Tensor` `x`;
# else test only `Tensor` `x`. In both cases, evaluate all results in a
# single `sess.run` call to avoid re-sampling the random `x` in graph mode.
if blockwise_arg and len(operator.operators) > 1:
# pylint: disable=protected-access
block_dimensions = (
operator._block_range_dimensions() if adjoint else
operator._block_domain_dimensions())
block_dimensions_fn = (
operator._block_range_dimension_tensors if adjoint else
operator._block_domain_dimension_tensors)
# pylint: enable=protected-access
split_x = linear_operator_util.split_arg_into_blocks(
block_dimensions,
block_dimensions_fn,
x, axis=-2)
if adjoint_arg:
split_x = [linalg.adjoint(y) for y in split_x]
split_matmul = operator.matmul(
split_x, adjoint=adjoint, adjoint_arg=adjoint_arg)
self.assertEqual(len(split_matmul), len(operator.operators))
split_matmul = linear_operator_util.broadcast_matrix_batch_dims(
split_matmul)
fused_block_matmul = array_ops.concat(split_matmul, axis=-2)
op_matmul_v, mat_matmul_v, fused_block_matmul_v = sess.run([
op_matmul, mat_matmul, fused_block_matmul])
# Check that the operator applied to blockwise input gives the same result
# as matrix multiplication.
self.assertAC(fused_block_matmul_v, mat_matmul_v)
else:
op_matmul_v, mat_matmul_v = sess.run([op_matmul, mat_matmul])
# Check that the operator applied to a `Tensor` gives the same result as
# matrix multiplication.
self.assertAC(op_matmul_v, mat_matmul_v)
def _test_matmul(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg):
def test_matmul(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_matmul
def _test_matmul_with_broadcast(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg):
def test_matmul_with_broadcast(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_matmul_with_broadcast
def _test_adjoint(use_placeholder, shapes_info, dtype):
def test_adjoint(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
return test_adjoint
def _test_cholesky(use_placeholder, shapes_info, dtype):
def test_cholesky(self):
with self.test_session(graph=ops.Graph()) as sess:
# This test fails to pass for float32 type by a small margin if we use
# random_seed.DEFAULT_GRAPH_SEED. The correct fix would be relaxing the
# test tolerance but the tolerance in this test is configured universally
# depending on its type. So instead of lowering tolerance for all tests
# or special casing this, just use a seed, +2, that makes this test pass.
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + 2
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
op_chol = operator.cholesky().to_dense()
mat_chol = linalg_ops.cholesky(mat)
op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol])
self.assertAC(mat_chol_v, op_chol_v)
return test_cholesky
def _test_eigvalsh(use_placeholder, shapes_info, dtype):
def test_eigvalsh(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
# Eigenvalues are real, so we'll cast these to float64 and sort
# for comparison.
op_eigvals = sort_ops.sort(
math_ops.cast(operator.eigvals(), dtype=dtypes.float64), axis=-1)
if dtype.is_complex:
mat = math_ops.cast(mat, dtype=dtypes.complex128)
else:
mat = math_ops.cast(mat, dtype=dtypes.float64)
mat_eigvals = sort_ops.sort(
math_ops.cast(
linalg_ops.self_adjoint_eigvals(mat), dtype=dtypes.float64),
axis=-1)
op_eigvals_v, mat_eigvals_v = sess.run([op_eigvals, mat_eigvals])
atol = self._atol[dtype] # pylint: disable=protected-access
rtol = self._rtol[dtype] # pylint: disable=protected-access
if dtype == dtypes.float32 or dtype == dtypes.complex64:
atol = 2e-4
rtol = 2e-4
self.assertAllClose(op_eigvals_v, mat_eigvals_v, atol=atol, rtol=rtol)
return test_eigvalsh
def _test_cond(use_placeholder, shapes_info, dtype):
def test_cond(self):
with self.test_session(graph=ops.Graph()) as sess:
# svd does not work with zero dimensional matrices, so we'll
# skip
if 0 in shapes_info.shape[-2:]:
return
# ROCm platform does not yet support complex types
if test.is_built_with_rocm() and \
((dtype == dtypes.complex64) or (dtype == dtypes.complex128)):
return
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
# Ensure self-adjoint and PD so we get finite condition numbers.
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
# Eigenvalues are real, so we'll cast these to float64 and sort
# for comparison.
op_cond = operator.cond()
s = math_ops.abs(linalg_ops.svd(mat, compute_uv=False))
mat_cond = math_ops.reduce_max(s, axis=-1) / math_ops.reduce_min(
s, axis=-1)
op_cond_v, mat_cond_v = sess.run([op_cond, mat_cond])
atol_override = {
dtypes.float16: 1e-2,
dtypes.float32: 1e-3,
dtypes.float64: 1e-6,
dtypes.complex64: 1e-3,
dtypes.complex128: 1e-6,
}
rtol_override = {
dtypes.float16: 1e-2,
dtypes.float32: 1e-3,
dtypes.float64: 1e-4,
dtypes.complex64: 1e-3,
dtypes.complex128: 1e-6,
}
atol = atol_override[dtype]
rtol = rtol_override[dtype]
self.assertAllClose(op_cond_v, mat_cond_v, atol=atol, rtol=rtol)
return test_cond
def _test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
rhs = self.make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.shape,
mat_solve.shape)
# If the operator is blockwise, test both blockwise rhs and `Tensor` rhs;
# else test only `Tensor` rhs. In both cases, evaluate all results in a
# single `sess.run` call to avoid re-sampling the random rhs in graph mode.
if blockwise_arg and len(operator.operators) > 1:
# pylint: disable=protected-access
block_dimensions = (
operator._block_range_dimensions() if adjoint else
operator._block_domain_dimensions())
block_dimensions_fn = (
operator._block_range_dimension_tensors if adjoint else
operator._block_domain_dimension_tensors)
# pylint: enable=protected-access
split_rhs = linear_operator_util.split_arg_into_blocks(
block_dimensions,
block_dimensions_fn,
rhs, axis=-2)
if adjoint_arg:
split_rhs = [linalg.adjoint(y) for y in split_rhs]
split_solve = operator.solve(
split_rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
self.assertEqual(len(split_solve), len(operator.operators))
split_solve = linear_operator_util.broadcast_matrix_batch_dims(
split_solve)
fused_block_solve = array_ops.concat(split_solve, axis=-2)
op_solve_v, mat_solve_v, fused_block_solve_v = sess.run([
op_solve, mat_solve, fused_block_solve])
# Check that the operator and matrix give the same solution when the rhs
# is blockwise.
self.assertAC(mat_solve_v, fused_block_solve_v)
else:
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
# Check that the operator and matrix give the same solution when the rhs is
# a `Tensor`.
self.assertAC(op_solve_v, mat_solve_v)
def _test_solve(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg):
def test_solve(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_solve
def _test_solve_with_broadcast(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg):
def test_solve_with_broadcast(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=False)
return test_solve_with_broadcast
def _test_inverse(use_placeholder, shapes_info, dtype):
def test_inverse(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_inverse_v, mat_inverse_v = sess.run([
operator.inverse().to_dense(), linalg.inv(mat)])
self.assertAC(op_inverse_v, mat_inverse_v, check_dtype=True)
return test_inverse
def _test_trace(use_placeholder, shapes_info, dtype):
def test_trace(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.shape, mat_trace.shape)
op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
return test_trace
def _test_add_to_tensor(use_placeholder, shapes_info, dtype):
def test_add_to_tensor(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_plus_2mat.shape)
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
return test_add_to_tensor
def _test_diag_part(use_placeholder, shapes_info, dtype):
def test_diag_part(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.shape,
op_diag_part.shape)
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
return test_diag_part
def _test_composite_tensor(use_placeholder, shapes_info, dtype):
def test_composite_tensor(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
self.assertIsInstance(operator, composite_tensor.CompositeTensor)
flat = nest.flatten(operator, expand_composites=True)
unflat = nest.pack_sequence_as(operator, flat, expand_composites=True)
self.assertIsInstance(unflat, type(operator))
# Input the operator to a `tf.function`.
x = self.make_x(operator, adjoint=False)
op_y = def_function.function(lambda op: op.matmul(x))(unflat)
mat_y = math_ops.matmul(mat, x)
if not use_placeholder:
self.assertAllEqual(mat_y.shape, op_y.shape)
# Test while_loop.
def body(op):
return type(op)(**op.parameters),
op_out, = while_v2.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(operator,),
maximum_iterations=3)
loop_y = op_out.matmul(x)
op_y_, loop_y_, mat_y_ = sess.run([op_y, loop_y, mat_y])
self.assertAC(op_y_, mat_y_)
self.assertAC(loop_y_, mat_y_)
# Ensure that the `TypeSpec` can be encoded.
nested_structure_coder.encode_structure(operator._type_spec) # pylint: disable=protected-access
return test_composite_tensor
def _test_saved_model(use_placeholder, shapes_info, dtype):
def test_saved_model(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(operator, adjoint=False)
class Model(module.Module):
def __init__(self, init_x):
self.x = nest.map_structure(
lambda x_: variables.Variable(x_, shape=None),
init_x)
@def_function.function(input_signature=(operator._type_spec,)) # pylint: disable=protected-access
def do_matmul(self, op):
return op.matmul(self.x)
saved_model_dir = self.get_temp_dir()
m1 = Model(x)
sess.run([v.initializer for v in m1.variables])
sess.run(m1.x.assign(m1.x + 1.))
save_model.save(m1, saved_model_dir)
m2 = load_model.load(saved_model_dir)
sess.run(m2.x.initializer)
sess.run(m2.x.assign(m2.x + 1.))
y_op = m2.do_matmul(operator)
y_mat = math_ops.matmul(mat, m2.x)
y_op_, y_mat_ = sess.run([y_op, y_mat])
self.assertAC(y_op_, y_mat_)
return test_saved_model
# pylint:enable=missing-docstring
def add_tests(test_cls):
"""Add tests for LinearOperator methods."""
test_name_dict = {
# All test classes should be added here.
"add_to_tensor": _test_add_to_tensor,
"adjoint": _test_adjoint,
"cholesky": _test_cholesky,
"cond": _test_cond,
"composite_tensor": _test_composite_tensor,
"det": _test_det,
"diag_part": _test_diag_part,
"eigvalsh": _test_eigvalsh,
"inverse": _test_inverse,
"log_abs_det": _test_log_abs_det,
"operator_matmul_with_same_type": _test_operator_matmul_with_same_type,
"operator_solve_with_same_type": _test_operator_solve_with_same_type,
"matmul": _test_matmul,
"matmul_with_broadcast": _test_matmul_with_broadcast,
"saved_model": _test_saved_model,
"slicing": _test_slicing,
"solve": _test_solve,
"solve_with_broadcast": _test_solve_with_broadcast,
"to_dense": _test_to_dense,
"trace": _test_trace,
}
optional_tests = [
# Test classes need to explicitly add these to cls.optional_tests.
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
tests_with_adjoint_args = [
"matmul",
"matmul_with_broadcast",
"solve",
"solve_with_broadcast",
]
if set(test_cls.skip_these_tests()).intersection(test_cls.optional_tests()):
raise ValueError(
"Test class {test_cls} had intersecting 'skip_these_tests' "
f"{test_cls.skip_these_tests()} and 'optional_tests' "
f"{test_cls.optional_tests()}.")
for name, test_template_fn in test_name_dict.items():
if name in test_cls.skip_these_tests():
continue
if name in optional_tests and name not in test_cls.optional_tests():
continue
for dtype, use_placeholder, shape_info in itertools.product(
test_cls.dtypes_to_test(),
test_cls.use_placeholder_options(),
test_cls.operator_shapes_infos()):
base_test_name = "_".join([
"test", name, "_shape={},dtype={},use_placeholder={}".format(
shape_info.shape, dtype, use_placeholder)])
if name in tests_with_adjoint_args:
for adjoint in test_cls.adjoint_options():
for adjoint_arg in test_cls.adjoint_arg_options():
test_name = base_test_name + ",adjoint={},adjoint_arg={}".format(
adjoint, adjoint_arg)
if hasattr(test_cls, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(
test_cls,
test_name,
test_util.run_deprecated_v1(
test_template_fn( # pylint: disable=too-many-function-args
use_placeholder, shape_info, dtype, adjoint,
adjoint_arg, test_cls.use_blockwise_arg())))
else:
if hasattr(test_cls, base_test_name):
raise RuntimeError("Test %s defined more than once" % base_test_name)
setattr(
test_cls,
base_test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder, shape_info, dtype)))
class SquareLinearOperatorDerivedClassTest(
LinearOperatorDerivedClassTest, metaclass=abc.ABCMeta):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((0, 0)),
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
class NonSquareLinearOperatorDerivedClassTest(
LinearOperatorDerivedClassTest, metaclass=abc.ABCMeta):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def skip_these_tests():
"""List of test names to skip."""
return [
"cholesky",
"eigvalsh",
"inverse",
"solve",
"solve_with_broadcast",
"det",
"log_abs_det",
]
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((2, 1)),
shapes_info((1, 2)),
shapes_info((1, 3, 2)),
shapes_info((3, 3, 4)),
shapes_info((2, 1, 2, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"make_rhs not implemented because we don't test solve")
def make_x(self, operator, adjoint, with_batch=True):
# Return the number of systems for the argument 'x' for .matmul(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape,
dtype,
oversampling_ratio=4,
force_well_conditioned=False):
"""[batch] positive definite Wisart matrix.
A Wishart(N, S) matrix is the S sample covariance matrix of an N-variate
(standard) Normal random variable.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
oversampling_ratio: S / N in the above. If S < N, the matrix will be
singular (unless `force_well_conditioned is True`).
force_well_conditioned: Python bool. If `True`, add `1` to the diagonal
of the Wishart matrix, then divide by 2, ensuring most eigenvalues are
close to 1.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not tensor_util.is_tf_type(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape.dims[-1].assert_is_compatible_with(shape.dims[-2])
shape = shape.as_list()
n = shape[-2]
s = oversampling_ratio * shape[-1]
wigner_shape = shape[:-2] + [n, s]
with ops.name_scope("random_positive_definite_matrix"):
wigner = random_normal(
wigner_shape,
dtype=dtype,
stddev=math_ops.cast(1 / np.sqrt(s), dtype.real_dtype))
wishart = math_ops.matmul(wigner, wigner, adjoint_b=True)
if force_well_conditioned:
wishart += linalg_ops.eye(n, dtype=dtype)
wishart /= math_ops.cast(2, dtype)
return wishart
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
eps=1e-4,
seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the columns of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
|
{
"content_hash": "fc645627594966d198df5b8488052058",
"timestamp": "",
"source": "github",
"line_count": 1367,
"max_line_length": 106,
"avg_line_length": 35.72640819312363,
"alnum_prop": 0.6507637495392932,
"repo_name": "paolodedios/tensorflow",
"id": "bb6fce07f3244fa49f8ce9d546bf49aca6babfe4",
"size": "49527",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linear_operator_test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from django import template
from teme.constants import MAX_RATING_STAR_VALUE
register = template.Library()
@register.simple_tag
def get_stars(stars):
full_star = '<span class="glyphicon glyphicon-star"></span>'
empty_star = '<span class="glyphicon glyphicon-star-empty"></span>'
return stars * full_star + (MAX_RATING_STAR_VALUE - stars) * empty_star
|
{
"content_hash": "e8c4cfe44f171e438ed88ae62c49113b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.717391304347826,
"repo_name": "palcu/mds",
"id": "c90f71be2b5506eee71f92576a2f9abed1d3bcd2",
"size": "368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "teme/templatetags/teme_extras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3330"
},
{
"name": "HTML",
"bytes": "11777"
},
{
"name": "Python",
"bytes": "12196"
}
],
"symlink_target": ""
}
|
try:
import uio
print("uio")
except ImportError:
print("no")
|
{
"content_hash": "4656a50cad62744811c397efeab64bef",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 19,
"avg_line_length": 14.6,
"alnum_prop": 0.6027397260273972,
"repo_name": "pozetroninc/micropython",
"id": "1031cba90914c243214043a5ff013bd146c3da48",
"size": "73",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "tests/feature_check/uio_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10582"
},
{
"name": "C",
"bytes": "14105315"
},
{
"name": "C++",
"bytes": "588898"
},
{
"name": "CMake",
"bytes": "876"
},
{
"name": "JavaScript",
"bytes": "5792"
},
{
"name": "Makefile",
"bytes": "147785"
},
{
"name": "Objective-C",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "1247126"
},
{
"name": "Shell",
"bytes": "16221"
}
],
"symlink_target": ""
}
|
import angr
from . import io_file_data_for_arch
######################################
# fwrite
######################################
class fwrite(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, src, size, nmemb, file_ptr):
# TODO handle errors
data = self.state.memory.load(src, size * nmemb, endness="Iend_BE")
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fileno = self.state.mem[file_ptr + fd_offset:].int.resolved
written = self.state.posix.write(fileno, data, size*nmemb)
return written
|
{
"content_hash": "39fb62d977d504790a6a585cedd87e37",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 30.789473684210527,
"alnum_prop": 0.5692307692307692,
"repo_name": "Ruide/angr-dev",
"id": "db9b14241ad165fb81e546c5bbf40d969e023764",
"size": "585",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "angr/angr/procedures/libc/fwrite.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2962"
},
{
"name": "Batchfile",
"bytes": "4542"
},
{
"name": "C",
"bytes": "18511978"
},
{
"name": "C++",
"bytes": "295194"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "12558"
},
{
"name": "OpenEdge ABL",
"bytes": "2415"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "5611416"
},
{
"name": "Shell",
"bytes": "41791"
}
],
"symlink_target": ""
}
|
from camog._csv import load, loads
|
{
"content_hash": "5e409ff78823cfe3b687c532d886be96",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.7714285714285715,
"repo_name": "walshb/camog",
"id": "22810dbc6cbbf3cf8e7ef6987d3334ad69ef507a",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camog/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "54459"
},
{
"name": "C++",
"bytes": "3771"
},
{
"name": "CMake",
"bytes": "886"
},
{
"name": "Makefile",
"bytes": "4995"
},
{
"name": "Python",
"bytes": "56998"
},
{
"name": "R",
"bytes": "2567"
},
{
"name": "Shell",
"bytes": "2387"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hpilo_facts
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Gather facts through an HP iLO interface
description:
- This module gathers facts for a specific system using its HP iLO interface.
These facts include hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
# Task to gather facts from a HP iLO interface only if the system is an HP server
- hpilo_facts:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- fail:
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ hw_system_serial }}) !'
when: cmdb_serialno != hw_system_serial
'''
RETURN = r'''
# Typical output of HP iLO_facts for a physical system
hw_bios_date:
description: BIOS date
returned: always
type: str
sample: 05/05/2011
hw_bios_version:
description: BIOS version
returned: always
type: str
sample: P68
hw_ethX:
description: Interface information (for each interface)
returned: always
type: dict
sample:
- macaddress: 00:11:22:33:44:55
macaddress_dash: 00-11-22-33-44-55
hw_eth_ilo:
description: Interface information (for the iLO network interface)
returned: always
type: dict
sample:
- macaddress: 00:11:22:33:44:BA
- macaddress_dash: 00-11-22-33-44-BA
hw_product_name:
description: Product name
returned: always
type: str
sample: ProLiant DL360 G7
hw_product_uuid:
description: Product UUID
returned: always
type: str
sample: ef50bac8-2845-40ff-81d9-675315501dac
hw_system_serial:
description: System serial number
returned: always
type: str
sample: ABC12345D6
hw_uuid:
description: Hardware UUID
returned: always
type: str
sample: 123456ABC78901D2
'''
import re
import traceback
import warnings
HPILO_IMP_ERR = None
try:
import hpilo
HAS_HPILO = True
except ImportError:
HPILO_IMP_ERR = traceback.format_exc()
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
try:
factname = 'hw_eth' + str(int(entry['Port']) - 1)
except Exception:
factname = non_numeric
facts = {
'macaddress': entry['MAC'].replace('-', ':'),
'macaddress_dash': entry['MAC']
}
return (factname, facts)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
host = module.params['host']
login = module.params['login']
password = module.params['password']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
try:
data = ilo.get_host_data()
except hpilo.IloCommunicationError as e:
module.fail_json(msg=to_native(e))
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except Exception:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except Exception:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
module.exit_json(ansible_facts=facts)
if __name__ == '__main__':
main()
|
{
"content_hash": "07d247f1e47582913ec2ba68e9a1eacd",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 118,
"avg_line_length": 31.653225806451612,
"alnum_prop": 0.5937579617834395,
"repo_name": "SergeyCherepanov/ansible",
"id": "cbc520ebb706fe925783a9b75b50c71994c9c1d7",
"size": "8032",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/remote_management/hpilo/hpilo_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
"""
Given a non-empty array of integers, return the k most frequent elements.
For example,
Given [1,1,1,2,2,3] and k = 2, return [1,2].
"""
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
keys = list(set(nums))
NumLists = list([0]*len(keys))
for num in nums:
NumLists[keys.index(num)] += 1
Ind = sorted(range(len(NumLists)), key=lambda x: NumLists[x], reverse=True)[:k]
ans = [keys[ind] for ind in Ind]
return ans
|
{
"content_hash": "ad602ffdc2ee96787573c53d18b4777d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 87,
"avg_line_length": 26.391304347826086,
"alnum_prop": 0.5354200988467874,
"repo_name": "yingcuhk/LeetCode",
"id": "540bdf9e4e25b1ef9fb8e1978bb828f18f006ac2",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/#347 TopK Frequent Elements/PythonCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126729"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import json
import matplotlib.pyplot as plt
import numpy as np
with open("../data/results/scores.json", 'r') as f:
scores = json.load(f)
with open("../data/results/distances.json", 'r') as f:
distances = json.load(f)
with open("../data/results/times.json", 'r') as f:
times = json.load(f)
print(scores)
print(distances)
print(times)
def scorelines():
''' line plot of learned classifier's scores (x = ns, y = accuracy) '''
ns = [10, 100, 1000, 10000]
fig, ax = plt.subplots()
ax.plot(ns, scores["basic"], marker='o', linestyle='-', color='r',
label='Basic')
ax.plot(ns, scores["tree"], marker='s', linestyle='-', color='b',
label='Tree')
ax.plot(ns, scores["trained"], marker='^', linestyle='-', color='g',
label='Learned Thresholds')
ax.set_xlabel('Size of Training Set')
ax.set_ylabel('Average Accuracy over Test Set')
title = 'Learning-based Classifier Accuracy by Size of Training Set'
ax.set_title(title)
ax.set_xscale('log')
ax.set_xlim(7, 14000)
ax.set_ylim(0.0, 1.0)
ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.legend(loc=2)
plt.tight_layout()
plt.savefig("../output/linechart_scores.png")
def scorebars():
''' bar chart of classifier's scores by classifier type (y = accuracy) '''
scorelist = [scores["lexical"], scores["basic"][-1], scores["tree"][-1],
scores["trained"][-1]]
N = 4
offset = 0.125
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars
fig, ax = plt.subplots()
ax.bar(ind+offset, scorelist, width, alpha=0.40, color='r')
# add some text for labels, title and axes ticks
ax.set_ylabel('Average Accuracy')
ax.set_title('Classification Accuracy by Classifier Type')
ax.set_xticks(ind+width/2+offset)
ax.set_xticklabels(('Lexical Matcher',
'Basic Classifier',
'Tree Classifier',
'Learned Thresholds'))
ax.set_ylim(0.0, 1.0)
ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.tight_layout()
plt.savefig("../output/barchart_scores.png")
def distancebars():
''' bar chart of classifier's distances by category (y = accuracy) '''
distancelist = [distances["basic"], distances["tree"],
distances["trained"]]
N = 3
offset = 0.125
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars
fig, ax = plt.subplots()
ax.bar(ind+offset, distancelist, width, alpha=0.40, color='b')
# add some text for labels, title and axes ticks
ax.set_ylabel('Average Distance')
ax.set_title('Average Distance of Predictions by Classifier Type')
ax.set_xticks(ind+width/2+offset)
ax.set_xticklabels(('Basic Classifier', 'Tree Classifier',
'Tree w/ Learned Thresholds'))
ax.set_ylim(0.0, 1.0)
# ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.tight_layout()
plt.savefig("../output/barchart_distances.png")
def timelines():
''' line plot of learned classifier's times (x = ns, y = ms) '''
ns = [10, 100, 1000, 10000]
fig, ax = plt.subplots()
ax.errorbar(ns, times["lexical"]["avgs"], yerr=times["lexical"]["stddevs"],
marker='*', linestyle='-', color='y', label='Lexical')
ax.errorbar(ns, times["basic"]["avgs"], yerr=times["basic"]["stddevs"],
marker='o', linestyle='-', color='r', label='Basic')
ax.errorbar(ns, times["tree"]["avgs"], yerr=times["tree"]["stddevs"],
marker='s', linestyle='-', color='b', label='Tree')
ax.errorbar(ns, times["trained"]["avgs"], yerr=times["trained"]["stddevs"],
marker='^', linestyle='-', color='g',
label='Learned Thresholds')
ax.set_xlabel('Size of Test Set')
ax.set_ylabel('Time to Classify Test Set (ms)')
ax.set_title('Classifier Execution Times (ms) by Size of Test Set')
ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlim(7, 14000)
# ax.set_ylim(0.0, 1.0)
# ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.legend(loc=2)
plt.tight_layout()
plt.show()
#plt.savefig("../output/linechart_times.png")
if __name__ == "__main__":
#scorelines()
#scorebars()
#distancebars()
timelines()
|
{
"content_hash": "3f9b1023498813e22d9d84f3b6ef9127",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 35.36220472440945,
"alnum_prop": 0.5838343353373413,
"repo_name": "yarbroughw/JMDE",
"id": "8d08503bce8a8b4fccea8eb270d7880af0dae710",
"size": "4491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JMDE/scripts/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "42931"
}
],
"symlink_target": ""
}
|
import unittest
import random
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import IP, UDP
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
from vpp_sub_interface import VppDot1QSubint, VppDot1ADSubint
class TestL2bd(VppTestCase):
""" L2BD Test Case """
@classmethod
def setUpClass(cls):
"""
Perform standard class setup (defined by class method setUpClass in
class VppTestCase) before running the test case, set test case related
variables and configure VPP.
:var int bd_id: Bridge domain ID.
:var int mac_entries_count: Number of MAC entries for bridge-domain to
learn.
:var int dot1q_tag: VLAN tag for dot1q sub-interface.
:var int dot1ad_sub_id: SubID of dot1ad sub-interface.
:var int dot1ad_outer_tag: VLAN S-tag for dot1ad sub-interface.
:var int dot1ad_inner_tag: VLAN C-tag for dot1ad sub-interface.
:var int sl_pkts_per_burst: Number of packets in burst for single-loop
test.
:var int dl_pkts_per_burst: Number of packets in burst for dual-loop
test.
"""
super(TestL2bd, cls).setUpClass()
# Test variables
cls.bd_id = 1
cls.mac_entries_count = 100
# cls.dot1q_sub_id = 100
cls.dot1q_tag = 100
cls.dot1ad_sub_id = 20
cls.dot1ad_outer_tag = 200
cls.dot1ad_inner_tag = 300
cls.sl_pkts_per_burst = 2
cls.dl_pkts_per_burst = 257
try:
# create 3 pg interfaces
cls.create_pg_interfaces(range(3))
# create 2 sub-interfaces for pg1 and pg2
cls.sub_interfaces = [
VppDot1QSubint(cls, cls.pg1, cls.dot1q_tag),
VppDot1ADSubint(cls, cls.pg2, cls.dot1ad_sub_id,
cls.dot1ad_outer_tag, cls.dot1ad_inner_tag)]
# packet flows mapping pg0 -> pg1, pg2, etc.
cls.flows = dict()
cls.flows[cls.pg0] = [cls.pg1, cls.pg2]
cls.flows[cls.pg1] = [cls.pg0, cls.pg2]
cls.flows[cls.pg2] = [cls.pg0, cls.pg1]
# packet sizes
cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
cls.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
cls.interfaces = list(cls.pg_interfaces)
cls.interfaces.extend(cls.sub_interfaces)
# Create BD with MAC learning enabled and put interfaces and
# sub-interfaces to this BD
for pg_if in cls.pg_interfaces:
sw_if_index = pg_if.sub_if.sw_if_index \
if hasattr(pg_if, 'sub_if') else pg_if.sw_if_index
cls.vapi.sw_interface_set_l2_bridge(sw_if_index,
bd_id=cls.bd_id)
# setup all interfaces
for i in cls.interfaces:
i.admin_up()
# mapping between packet-generator index and lists of test hosts
cls.hosts_by_pg_idx = dict()
# create test host entries and inject packets to learn MAC entries
# in the bridge-domain
cls.create_hosts_and_learn(cls.mac_entries_count)
cls.logger.info(cls.vapi.ppcli("show l2fib"))
except Exception:
super(TestL2bd, cls).tearDownClass()
raise
def setUp(self):
"""
Clear trace and packet infos before running each test.
"""
super(TestL2bd, self).setUp()
self.packet_infos = {}
def tearDown(self):
"""
Show various debug prints after each test.
"""
super(TestL2bd, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.ppcli("show l2fib verbose"))
self.logger.info(self.vapi.ppcli("show bridge-domain %s detail" %
self.bd_id))
@classmethod
def create_hosts_and_learn(cls, count):
"""
Create required number of host MAC addresses and distribute them among
interfaces. Create host IPv4 address for every host MAC address. Create
L2 MAC packet stream with host MAC addresses per interface to let
the bridge domain learn these MAC addresses.
:param count: Integer number of hosts to create MAC/IPv4 addresses for.
"""
n_int = len(cls.pg_interfaces)
macs_per_if = count / n_int
i = -1
for pg_if in cls.pg_interfaces:
i += 1
start_nr = macs_per_if * i
end_nr = count if i == (n_int - 1) else macs_per_if * (i + 1)
cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
packets = []
for j in range(start_nr, end_nr):
host = Host(
"00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
"172.17.1%02x.%u" % (pg_if.sw_if_index, j))
packet = (Ether(dst="ff:ff:ff:ff:ff:ff", src=host.mac))
hosts.append(host)
if hasattr(pg_if, 'sub_if'):
packet = pg_if.sub_if.add_dot1_layer(packet)
packets.append(packet)
pg_if.add_stream(packets)
cls.logger.info("Sending broadcast eth frames for MAC learning")
cls.pg_start()
def create_stream(self, src_if, packet_sizes, packets_per_burst):
"""
Create input packet stream for defined interface.
:param object src_if: Interface to create packet stream for.
:param list packet_sizes: List of required packet sizes.
:param int packets_per_burst: Number of packets in burst.
:return: Stream of packets.
"""
pkts = []
for i in range(0, packets_per_burst):
dst_if = self.flows[src_if][i % 2]
dst_host = random.choice(self.hosts_by_pg_idx[dst_if.sw_if_index])
src_host = random.choice(self.hosts_by_pg_idx[src_if.sw_if_index])
pkt_info = self.create_packet_info(
src_if.sw_if_index, dst_if.sw_if_index)
payload = self.info_to_payload(pkt_info)
p = (Ether(dst=dst_host.mac, src=src_host.mac) /
IP(src=src_host.ip4, dst=dst_host.ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
pkt_info.data = p.copy()
if hasattr(src_if, 'sub_if'):
p = src_if.sub_if.add_dot1_layer(p)
size = random.choice(packet_sizes)
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, pg_if, capture):
"""
Verify captured input packet stream for defined interface.
:param object pg_if: Interface to verify captured packet stream for.
:param list capture: Captured packet stream.
"""
last_info = dict()
for i in self.pg_interfaces:
last_info[i.sw_if_index] = None
dst_sw_if_index = pg_if.sw_if_index
for packet in capture:
payload_info = self.payload_to_info(str(packet[Raw]))
src_sw_if_index = payload_info.src
src_if = None
for ifc in self.pg_interfaces:
if ifc != pg_if:
if ifc.sw_if_index == src_sw_if_index:
src_if = ifc
break
if hasattr(src_if, 'sub_if'):
# Check VLAN tags and Ethernet header
packet = src_if.sub_if.remove_dot1_layer(packet)
self.assertTrue(Dot1Q not in packet)
try:
ip = packet[IP]
udp = packet[UDP]
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_sw_if_index)
self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
(pg_if.name, payload_info.src, packet_index))
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
# Check standard fields
self.assertEqual(ip.src, saved_packet[IP].src)
self.assertEqual(ip.dst, saved_packet[IP].dst)
self.assertEqual(udp.sport, saved_packet[UDP].sport)
self.assertEqual(udp.dport, saved_packet[UDP].dport)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
for i in self.pg_interfaces:
remaining_packet = self.get_next_packet_info_for_interface2(
i, dst_sw_if_index, last_info[i.sw_if_index])
self.assertTrue(
remaining_packet is None,
"Port %u: Packet expected from source %u didn't arrive" %
(dst_sw_if_index, i.sw_if_index))
def run_l2bd_test(self, pkts_per_burst):
""" L2BD MAC learning test """
# Create incoming packet streams for packet-generator interfaces
for i in self.pg_interfaces:
packet_sizes = self.sub_if_packet_sizes if hasattr(i, 'sub_if') \
else self.pg_if_packet_sizes
pkts = self.create_stream(i, packet_sizes, pkts_per_burst)
i.add_stream(pkts)
# Enable packet capture and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify outgoing packet streams per packet-generator interface
for i in self.pg_interfaces:
capture = i.get_capture()
self.logger.info("Verifying capture on interface %s" % i.name)
self.verify_capture(i, capture)
def test_l2bd_sl(self):
""" L2BD MAC learning single-loop test
Test scenario:
1.config
MAC learning enabled
learn 100 MAC enries
3 interfaces: untagged, dot1q, dot1ad (dot1q used instead of
dot1ad in the first version)
2.sending l2 eth pkts between 3 interface
64B, 512B, 1518B, 9200B (ether_size)
burst of 2 pkts per interface
"""
self.run_l2bd_test(self.sl_pkts_per_burst)
def test_l2bd_dl(self):
""" L2BD MAC learning dual-loop test
Test scenario:
1.config
MAC learning enabled
learn 100 MAC enries
3 interfaces: untagged, dot1q, dot1ad (dot1q used instead of
dot1ad in the first version)
2.sending l2 eth pkts between 3 interface
64B, 512B, 1518B, 9200B (ether_size)
burst of 257 pkts per interface
"""
self.run_l2bd_test(self.dl_pkts_per_burst)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
{
"content_hash": "e7b3a3535d061add5d00a08937280a82",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 39.517605633802816,
"alnum_prop": 0.5553773500846476,
"repo_name": "licko/vpp-1701-licko",
"id": "50720e64cd8dd25e44b9aa86e3e840b8f02547a0",
"size": "11246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_l2bd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "14862"
},
{
"name": "C",
"bytes": "11299176"
},
{
"name": "C++",
"bytes": "606713"
},
{
"name": "CSS",
"bytes": "794"
},
{
"name": "Emacs Lisp",
"bytes": "99891"
},
{
"name": "Java",
"bytes": "120676"
},
{
"name": "Lua",
"bytes": "80051"
},
{
"name": "M4",
"bytes": "56951"
},
{
"name": "Makefile",
"bytes": "205226"
},
{
"name": "Objective-C",
"bytes": "15373"
},
{
"name": "Python",
"bytes": "581371"
},
{
"name": "Ruby",
"bytes": "3461"
},
{
"name": "Shell",
"bytes": "36809"
},
{
"name": "Yacc",
"bytes": "3034"
}
],
"symlink_target": ""
}
|
import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.db.models.lookups import Exact, In
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
# For performance, only add a from_db_value() method if the base field
# implements it.
if hasattr(self.base_field, 'from_db_value'):
self.from_db_value = self._from_db_value
super().__init__(**kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def check(self, **kwargs):
errors = super().check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field.clone(),
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def _from_db_value(self, value, expression, connection, context):
if value is None:
return value
return [
self.base_field.from_db_value(item, expression, connection, context)
for item in value
]
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
if val is None:
values.append(None)
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
if '_' not in name:
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super().validate(value, model_instance)
for index, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super().run_validators(value)
for index, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super().formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayExact(Exact):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE '
'coalesce(array_length(%(lhs)s, 1), 0) END'
) % {'lhs': lhs}, params
@ArrayField.register_lookup
class ArrayInLookup(In):
def get_prep_lookup(self):
values = super().get_prep_lookup()
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
prepared_values = []
for value in values:
if hasattr(value, 'resolve_expression'):
prepared_values.append(value)
else:
prepared_values.append(tuple(value))
return prepared_values
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory:
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory:
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
|
{
"content_hash": "6ccb5d294e226f822af9e6b597dbb4bf",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 100,
"avg_line_length": 33.398648648648646,
"alnum_prop": 0.5751567873760874,
"repo_name": "auready/django",
"id": "15cbf5e45e8be050ee6baaee6cc3f0bfa09457db",
"size": "9886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/postgres/fields/array.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53169"
},
{
"name": "HTML",
"bytes": "173634"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12200962"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from celery import shared_task
from django.contrib.contenttypes.models import ContentType
from matches.models import Squad, MatchPilot, MatchUpgrade, Match
from xwing_data.models import Pilot, Upgrade, StatisticSet, Ship, Faction
@shared_task(bind=True, max_retries=3)
def delete_all_data(self):
Squad.objects.all().delete()
MatchPilot.objects.all().delete()
MatchUpgrade.objects.all().delete()
Match.objects.all().delete
@shared_task(bind=True, max_retries=3)
def import_squad(self, xws, player_name):
try:
squad = Squad(
player_name=player_name,
list_name=xws.get("name")
)
squad.save()
factions = Faction.objects.filter(xws=xws.get("faction"))
for pilot in xws.get("pilots", []):
pilot_data = Pilot.objects.get(
xws=pilot.get("name"),
ship=Ship.objects.get(xws=pilot.get("ship")),
faction__in=factions
)
if pilot_data.ship_override:
target = pilot_data.ship_override
else:
target = pilot_data.ship.stats
target.skill = pilot_data.skill
stats = StatisticSet(
skill=target.skill,
attack=target.attack,
agility=target.agility,
hull=target.hull,
shields=target.shields
)
stats.save()
match_pilot = MatchPilot(
pilot=pilot_data,
points=pilot.get("points"),
stats=stats,
)
match_pilot.save()
if pilot.get('upgrades', []):
for upgrade_type, upgrades in pilot['upgrades'].items():
for upgrade in upgrades:
upgrade_object = Upgrade.objects.filter(xws=upgrade).last()
match_upgrade = MatchUpgrade(upgrade=upgrade_object)
match_upgrade.save()
match_pilot.upgrades.add(match_upgrade)
for grant in upgrade_object.grants.all():
if grant.content_type == ContentType.objects.get(model="statisticset"):
for field in ["skill", "attack", "agility", "hull", "shields"]:
setattr(
match_pilot.stats,
field,
getattr(grant.content_object, field, 0) + getattr(match_pilot.stats, field, 0)
)
match_pilot.stats.save()
squad.pilots.add(match_pilot)
return squad.id
except Exception as exc:
print(exc)
if squad:
squad.delete() # Tidy Up
return False
|
{
"content_hash": "a20eeac1041b84cb005db738cc085031",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 118,
"avg_line_length": 34.66265060240964,
"alnum_prop": 0.5074730622175878,
"repo_name": "sheepeatingtaz/xwingoverlayer",
"id": "0088a7a07d53a5621da7e34af27643fb9e926027",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matches/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9434"
},
{
"name": "HTML",
"bytes": "21694"
},
{
"name": "JavaScript",
"bytes": "5176"
},
{
"name": "Python",
"bytes": "74520"
},
{
"name": "Shell",
"bytes": "5558"
}
],
"symlink_target": ""
}
|
import time
import numpy as np
from scipy.linalg import eigh
from scipy.sparse.linalg import LinearOperator, eigsh
# correlation kernel/function parameters
sigma = 0.8 # variance or sill
L = 5000 # correlation length
# grid parameters
nrow = 19
ncol = 63
Lx = 1000.
Ly = 900.
num_eig = 10
# initialise swig wrapped cpp class
import correlation
C_matrix = correlation.C_matrix(sigma, L)
C_matrix.set_dims(nrow, ncol, Lx, Ly)
out_vec = np.zeros((nrow*ncol), 'd')
def Av(v):
C_matrix.av_no_C(nrow*ncol, v, out_vec)
return out_vec
A = LinearOperator((nrow*ncol,nrow*ncol), matvec=Av, dtype='d')
# A.matvect(scipy.ones(nrow*ncol))
t0 = time.time()
eig_vals, eig_vecs = eigsh( A, k=num_eig)
t1 = time.time()
# save
np.savez('kle_eigen.npz', eig_vals=eig_vals, eig_vecs=eig_vecs)
# load
npzfile = np.load('kle_eigen.npz')
eig_vals = npzfile['eig_vals']
eig_vecs = npzfile['eig_vecs']
# how you'd use it
##########################################
# mu = np.zeros(nrow*ncol) # could be the mean of any data you have
# scale = 1.
# # evaluate KLE with given modes
# # other parameters (other than modes) are mu, scale, eig_vecs, eig_vals (or really sigma and L on which they depend)
# def KLE(modes):
# coefs = np.sqrt(eig_vals) * modes # elementwise
# truncated_M = mu + scale * np.dot( eig_vecs, coefs)
# unflattened = truncated_M.reshape(nrow,ncol)
# return unflattened
# modes = np.ones(num_eig)
# print KLE(modes)
############################################
print "=================================="
print "done in ", t1-t0, "seconds"
print "=================================="
for i in range(num_eig):
assert np.allclose(A.matvec(eig_vecs[:,i]), eig_vals[i]*eig_vecs[:,i])
print "=================================="
print "they are indeed eigenvectors"
print "=================================="
|
{
"content_hash": "66412b4b670f19722ab7b707e99b2bef",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 118,
"avg_line_length": 24.493333333333332,
"alnum_prop": 0.5993467610234078,
"repo_name": "mjasher/computation",
"id": "0436ccd43c5f8ed1f0f68b845c0f59ff83d6bb60",
"size": "1837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "correlation/make_kle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "106424"
},
{
"name": "C++",
"bytes": "6228"
},
{
"name": "FORTRAN",
"bytes": "20965"
},
{
"name": "HTML",
"bytes": "923"
},
{
"name": "JavaScript",
"bytes": "2675"
},
{
"name": "Makefile",
"bytes": "123"
},
{
"name": "Python",
"bytes": "34866"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db import connection
from django.db.models import Sum, Value
from django.db.models.functions import Coalesce
from django.shortcuts import render
from django.urls import reverse
from django.views import View
from geral.functions import (
request_user,
has_permission,
)
import lotes.models
from produto.functions import papg_modelo
import cd.queries as queries
class SolicitacaoDetalhe(LoginRequiredMixin, View):
def __init__(self):
self.template_name = 'cd/solicitacao_detalhe.html'
self.title_name = 'Detalhes de solicitação'
def link_endereco(self, row):
if row['lote__local'] != '-Ausente-':
row['lote__local|LINK'] = reverse(
'cd:estoque_filtro',
args=['E', row['lote__local']])
row['lote__local|TARGET'] = '_BLANK'
def mount_context(self, solicit_id, user):
context = {
'solicit_id': solicit_id,
'user': user,
}
try:
solicitacao = lotes.models.SolicitaLote.objects.get(
id=solicit_id)
except lotes.models.SolicitaLote.DoesNotExist:
context['erro'] = \
'Id de solicitação inválido.'
return context
solicit_ativa_recs = lotes.models.SolicitaLote.objects.filter(
usuario=user, ativa=True)
if len(solicit_ativa_recs) == 1:
solicit_ativa_cod = solicit_ativa_recs[0].codigo
solicit_ativa_id = str(solicit_ativa_recs[0].id)
if solicit_ativa_id != solicit_id:
context['solicit_ativa_cod'] = solicit_ativa_cod
context['solicit_ativa_id'] = solicit_ativa_id
else:
context['solicit_ativa_cod'] = '='
context['solicitacao'] = solicitacao
data_ped = lotes.models.SolicitaLotePedido.objects.filter(
solicitacao=solicitacao).order_by('pedido').values('pedido')
pedidos = ' '.join([
str(r['pedido']) for r in data_ped
])
context['solicitacao_pedidos'] = pedidos
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'update_at'
).annotate(
lote__local=Coalesce('lote__local', Value('-Ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-update_at'
)
for row in solicit_qtds:
row['delete'] = ''
row['delete|HOVER'] = 'Exclui lote'
row['delete|LINK'] = reverse(
'cd:solicitacao_detalhe__get3',
args=[solicitacao.id, 'd', row['id']])
row['delete|GLYPHICON'] = 'glyphicon-remove'
row['lote__lote|LINK'] = reverse(
'producao:lote__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
self.link_endereco(row)
link = reverse(
'cd:solicitacao_detalhe__get2',
args=[solicitacao.id, 'l'])
limpa = '''
<a title="Limpa solicitação"
href="{link}"
><span class="glyphicon glyphicon-remove-circle" aria-hidden="true"
></span></a>
'''.format(link=link)
context.update({
'headers': ['Endereço', 'OP', 'Lote', 'Referência',
'Cor', 'Tamanho', 'Quant. Solicitada', 'Em', (limpa,)],
'fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho', 'qtd',
'update_at', 'delete'],
'data': solicit_qtds,
})
solicit_qtds_inat = \
lotes.models.SolicitaLoteQtd.objects_inactive.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'when'
).annotate(
lote__local=Coalesce('lote__local', Value('-ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-when'
)
for row in solicit_qtds_inat:
row['lote__lote|LINK'] = reverse(
'producao:lote__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
context.update({
'inat_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Removido em'],
'inat_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtd', 'when'],
'inat_data': solicit_qtds_inat,
})
por_endereco = lotes.models.SolicitaLoteQtd.objects.values(
'lote__op', 'lote__lote', 'lote__qtd_produzir', 'lote__qtd',
'lote__referencia', 'lote__cor', 'lote__tamanho'
).annotate(
lote_ordem=Coalesce('lote__local', Value('0000')),
lote__local=Coalesce('lote__local', Value('-Ausente-')),
qtdsum=Sum('qtd')
).filter(
solicitacao=solicitacao
).order_by(
'lote_ordem', 'lote__op', 'lote__referencia', 'lote__cor',
'lote__tamanho', 'lote__lote'
)
for row in por_endereco:
if row['qtdsum'] == row['lote__qtd_produzir']:
row['inteira_parcial'] = 'Lote inteiro'
else:
row['inteira_parcial'] = 'Parcial'
if row['qtdsum'] > row['lote__qtd']:
row['inteira_parcial'] += "*"
row['lote__lote|LINK'] = reverse(
'producao:lote__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
self.link_endereco(row)
context.update({
'e_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Solicitação'],
'e_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtdsum', 'inteira_parcial'],
'e_data': por_endereco,
})
para_cx = lotes.models.SolicitaLoteQtd.objects.values(
'lote__op', 'lote__lote', 'lote__qtd_produzir',
'lote__estagio', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'lote__ordem_tamanho',
'lote__op_obj__deposito'
).annotate(
lote_ordem=Coalesce('lote__local', Value('0000')),
lote__local=Coalesce('lote__local', Value('-Ausente-')),
qtdsum=Sum('qtd')
).filter(
solicitacao=solicitacao
)
for row in para_cx:
row['modelo'] = papg_modelo(row['lote__referencia'])
row['modelo_order'] = int(row['modelo'])
if row['qtdsum'] == row['lote__qtd_produzir']:
row['inteira_parcial'] = 'Lote inteiro'
else:
row['inteira_parcial'] = 'Parcial'
row['lote__lote|LINK'] = reverse(
'producao:lote__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
if row['lote__estagio'] == 999:
row['estagio'] = 'Finalizado'
else:
row['estagio'] = row['lote__estagio']
can_transf = row['inteira_parcial'] == 'Parcial'
can_transf = can_transf or (
row['lote_ordem'] == '0000'
and row['inteira_parcial'] == 'Lote inteiro')
can_transf = can_transf and row['lote__estagio'] == 999
can_transf = can_transf and row['lote__op_obj__deposito'] != 0
row['transf_order'] = 1
row['transf'] = ''
if can_transf:
row['transf_order'] = 0
row['transf|HOVER'] = 'Transfere lote para caixinha'
row['transf|LINK'] = (
"javascript:transfere("
f" '{row['lote__referencia']}'"
f", '{row['lote__cor']}'"
f", '{row['lote__tamanho']}'"
f", '{row['qtdsum']}'"
f", '{row['lote__op_obj__deposito']}'"
f", '{row['modelo'].zfill(5)}'"
")")
row['transf|GLYPHICON'] = 'glyphicon-log-in'
row['lote__op|LINK'] = reverse(
'producao:op__get', args=[row['lote__op']])
self.link_endereco(row)
para_cx = sorted(
para_cx, key=lambda i: (
i['transf_order'],
i['modelo_order'],
i['lote__cor'],
i['lote__ordem_tamanho'],
i['lote__referencia'],
i['lote__lote'],
))
context.update({
'cx_headers': [
'Modelo',
'Cor',
'Tamanho',
'Quant. Solicitada',
'Referência',
'Depósito',
'Transfere',
'OP',
'Lote',
'Solicitação',
'Endereço',
'Estágio',
],
'cx_fields': [
'modelo',
'lote__cor',
'lote__tamanho',
'qtdsum',
'lote__referencia',
'lote__op_obj__deposito',
'transf',
'lote__op',
'lote__lote',
'inteira_parcial',
'lote__local',
'estagio',
],
'cx_style': {},
'cx_data': para_cx,
})
referencias = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao=solicitacao
).values('lote__referencia').distinct()
cursor_def = connection.cursor()
grades2 = []
for referencia in referencias:
# Grade de solicitação
context_ref = queries.grade_solicitacao(
cursor_def, referencia['lote__referencia'],
solicit_id=solicit_id)
grades2.append(context_ref)
context.update({
'grades2': grades2,
})
grade_total = queries.grade_solicitacao(
cursor_def, solicit_id=solicit_id)
grade_total.update({
'style': {i: 'text-align: right;'
for i in range(2, len(grade_total['fields'])+1)},
})
context.update({
'gt': grade_total,
})
return context
def get(self, request, *args, **kwargs):
context = {'titulo': self.title_name}
if 'acao' in kwargs:
acao = kwargs['acao']
else:
acao = None
if 'id' in kwargs:
slq_id = kwargs['id']
else:
slq_id = None
solicit_id = kwargs['solicit_id']
user = request_user(request)
if acao is not None:
if not has_permission(request, 'lotes.change_solicitalote'):
context.update({
'erro': 'Usuário não tem direito de alterar solicitações.'
})
return render(request, self.template_name, context)
if acao == 'd' and slq_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.get(
id=slq_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
if acao == 'l' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva lote em todas as solicitações
if acao == 'dl' and slq_id is not None:
lote = slq_id
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
lote__lote=lote)
solicit_qtds.delete()
context.update({
'acao_mensagem':
'Lote {} cacelado em todas as solicitações.'.format(
lote
)
})
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva endereçados
if acao == 'de' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id, lote__local__isnull=False)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# move endereçados
if acao == 'move' and solicit_id is not None:
try:
solicit_ativa = lotes.models.SolicitaLote.objects.get(
usuario=user, ativa=True)
try:
for solicit_qtd in \
lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id,
lote__local__isnull=False):
solicit_qtd.solicitacao = solicit_ativa
solicit_qtd.save()
except Exception:
pass
except lotes.models.SolicitaLote.DoesNotExist:
pass
data = self.mount_context(solicit_id, user)
context.update(data)
return render(request, self.template_name, context)
|
{
"content_hash": "74d25b6b12375ac31aed1b4ee6c54a01",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 79,
"avg_line_length": 36.64751958224543,
"alnum_prop": 0.4791963522371046,
"repo_name": "anselmobd/fo2",
"id": "a22354fb8634cdbc22963eedf5a900c7ac9d1051",
"size": "14069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cd/views/solicitacao_detalhe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import json
import re
import shutil
from datetime import datetime
from pathlib import Path
from string import capwords
from typing import (Callable, Iterable, NamedTuple, Optional, TypedDict, Union,
cast)
from libsyntyche import terminal
from libsyntyche.cli import ArgumentRules, Command
from libsyntyche.texteditor import Searcher
from libsyntyche.widgets import (HBoxLayout, Label, Stretch, VBoxLayout,
mk_signal0, mk_signal1)
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QTextCharFormat
from PyQt5.QtWidgets import QShortcut
from .common import BackstoryViewerFormat, Settings, run_external_command
from .taggedlist import ATTR_FILE, ATTR_TITLE, Entry
Color = Union[QtGui.QColor, Qt.GlobalColor]
class Page(NamedTuple):
title: str
file: Path
cursor_pos: int = 0
scroll_pos: int = 0
PageMetadata = TypedDict(
'PageMetadata',
{'title': str, 'created': str, 'revision': int, 'revision created': str}
)
def fix_title(file: Path) -> str:
return capwords(file.stem.replace('-', ' '))
def read_metadata(file: Path) -> tuple[str, str]:
lines = file.read_text(encoding='utf-8').split('\n', 1)
return lines[0], lines[1]
def generate_page_metadata(title: str,
created: Optional[str] = None,
revision: Optional[int] = None,
revision_created: Optional[str] = None
) -> PageMetadata:
"""
Return a JSON string with the default metadata for a single backstory page.
"""
now = datetime.now().isoformat()
return {
'title': title,
'created': now if created is None else created,
'revision': 0 if revision is None else revision,
'revision created': now if revision_created is None else revision_created,
}
class Formatter(QtGui.QSyntaxHighlighter):
def __init__(self, parent: QtCore.QObject, settings: Settings) -> None:
super().__init__(parent)
self.formats: list[tuple[str, QTextCharFormat]] = []
self.update_formats(settings.backstory_viewer_formats.value)
def update_formats(self, format_strings: list[BackstoryViewerFormat]) -> None:
self.formats = []
font = QtGui.QFont
for fmt in format_strings:
f = QTextCharFormat()
if fmt.bold:
f.setFontWeight(font.Bold)
if fmt.italic:
f.setFontItalic(True)
if fmt.underline:
f.setFontUnderline(True)
if fmt.strikethrough:
f.setFontStrikeOut(True)
if fmt.size is not None:
f.setFontPointSize(fmt.size)
if fmt.color is not None:
f.setForeground(fmt.color)
self.formats.append((fmt.pattern, f))
self.rehighlight()
def highlightBlock(self, text: str) -> None:
for rx, fmt in self.formats:
for chunk in re.finditer(rx, text):
self.setFormat(chunk.start(), chunk.end() - chunk.start(), fmt)
class TabBar(QtWidgets.QTabBar):
set_tab_index = mk_signal1(int)
def __init__(self, parent: QtWidgets.QWidget, print_: Callable[[str], None]) -> None:
super().__init__(parent)
self.print_ = print_
self.pages: list[Page] = []
def mousePressEvent(self, ev: QtGui.QMouseEvent) -> None:
if ev.button() == Qt.LeftButton:
tab = self.tabAt(ev.pos())
if tab != -1:
self.set_tab_index.emit(tab)
def wheelEvent(self, ev: QtGui.QWheelEvent) -> None:
delta = ev.angleDelta().y() + ev.angleDelta().x()
if delta != 0:
self.change_tab(-delta)
def next_tab(self) -> None:
self.change_tab(1)
def prev_tab(self) -> None:
self.change_tab(-1)
def change_tab(self, direction: int) -> None:
current_tab = self.currentIndex()
if direction > 0 and current_tab == self.count() - 1:
new_tab = 0
elif direction < 0 and current_tab == 0:
new_tab = self.count() - 1
else:
new_tab = current_tab + int(direction / abs(direction))
self.set_tab_index.emit(new_tab)
def clear(self) -> None:
while self.count() > 1:
if self.currentIndex() == 0:
self.removeTab(1)
else:
self.removeTab(0)
self.removeTab(0)
def current_page_file(self) -> Path:
return self.pages[self.currentIndex()].file
def set_page_position(self, i: int, cursor_pos: int, scroll_pos: int) -> None:
self.pages[i] = self.pages[i]._replace(cursor_pos=cursor_pos, scroll_pos=scroll_pos)
def get_page_position(self, i: int) -> tuple[int, int]:
return self.pages[i][2:4]
def load_pages(self, root: Path) -> Iterable[Page]:
"""
Read all pages from the specified directory and build a list of them.
"""
for file in root.iterdir():
if re.search(r'\.rev\d+$', file.name) is not None:
continue
if file.is_dir():
continue
first_line, data = file.read_text().split('\n', 1)
title = fix_title(file)
default_metadata = generate_page_metadata(title)
metadata: PageMetadata
try:
metadata = json.loads(first_line)
except ValueError:
self.print_(f'Bad/no properties found on page {file.name}, fixing...')
new_first_line = json.dumps(generate_page_metadata(title))
file.write_text('\n'.join([new_first_line, first_line, data]))
yield Page(title, file)
else:
original_metadata = metadata.copy()
# Special case if date exists and revision date doesn't
if 'revision created' not in metadata:
if 'created' in metadata:
metadata['revision created'] = metadata['created']
else:
metadata['created'] = default_metadata['created']
# Make sure all required keys exist
if 'title' not in metadata:
metadata['title'] = default_metadata['title']
if 'created' not in metadata:
metadata['created'] = default_metadata['created']
if 'revision' not in metadata:
metadata['revision'] = default_metadata['revision']
# Update the file if needed
if metadata != original_metadata:
file.write_text(json.dumps(metadata) + '\n' + data)
yield Page(metadata['title'], file)
def open_entry(self, root: Path) -> None:
"""
Ready the tab bar for a new entry.
"""
self.clear()
# fnames = os.listdir(root)
self.pages = sorted(self.load_pages(root))
for page in self.pages:
self.addTab(page.title)
def add_page(self, title: str, file: Path) -> int:
"""
Add a new page to and then sort the tab bar. Return the index of the
new tab.
"""
self.pages.append(Page(title, file))
self.pages.sort()
i = next(pos for pos, page in enumerate(self.pages) if page.file == file)
self.insertTab(i, title)
return i
def remove_page(self) -> Path:
"""
Remove the active page from the tab bar and return the page's file name
Note that the actual file on the disk is not removed by this.
Raise IndexError if there is only one tab left.
"""
if self.count() <= 1:
raise IndexError("Can't remove the only page")
i: int = self.currentIndex()
page = self.pages.pop(i)
self.removeTab(i)
self.print_(f'Page "{page.title}" deleted')
return page.file
def rename_page(self, newtitle: str) -> None:
"""
Rename the active page and update the tab bar.
"""
i = self.currentIndex()
self.pages[i] = self.pages[i]._replace(title=newtitle)
self.pages.sort()
self.setTabText(i, newtitle)
new_i = next(pos for pos, page in enumerate(self.pages) if page.title == newtitle)
self.moveTab(i, new_i)
class BackstoryTextEdit(QtWidgets.QTextEdit):
resized = mk_signal0()
def resizeEvent(self, ev: QtGui.QResizeEvent) -> None:
super().resizeEvent(ev)
self.resized.emit()
class BackstoryWindow(QtWidgets.QFrame):
closed = mk_signal1(Path)
def __init__(self, entry: Entry, settings: Settings, history_path: Path) -> None:
super().__init__()
self.settings = settings
self.textarea = BackstoryTextEdit()
self.default_font = self.textarea.fontFamily()
self.textarea.setTabStopWidth(30)
self.textarea.setAcceptRichText(False)
self.title_label = Label('', name='backstory_title', parent=self)
self.tab_counter = Label('', name='backstory_tab_counter', parent=self)
self.revision_notice = Label('', name='backstory_revision_counter', parent=self)
history_file = history_path / (entry[ATTR_FILE].name + '.history')
self.terminal = BackstoryTerminal(self, history_file)
self.searcher = Searcher(self.textarea, self.terminal.error, self.terminal.print_)
self.tab_bar = TabBar(self, self.terminal.print_)
self.create_layout(self.title_label, self.tab_bar, self.tab_counter,
self.revision_notice, self.textarea, self.terminal)
self.formatter = Formatter(self.textarea, settings)
self.connect_signals()
self.revision_active = False
self.force_quit_flag = False
self.hotkey_next_tab = QShortcut(self.settings.hotkey_next_tab.value,
self, self.tab_bar.next_tab)
self.settings.hotkey_next_tab.changed.connect(self.hotkey_next_tab.setKey)
self.hotkey_prev_tab = QShortcut(self.settings.hotkey_prev_tab.value,
self, self.tab_bar.prev_tab)
self.settings.hotkey_prev_tab.changed.connect(self.hotkey_prev_tab.setKey)
self.hotkey_save_tab = QShortcut(self.settings.hotkey_save.value,
self, cast(Callable[[], None], self.save_tab))
self.settings.hotkey_save.changed.connect(self.hotkey_save_tab.setKey)
self.hotkey_toggle_terminal = QShortcut(self.settings.hotkey_toggle_terminal.value,
self, self.toggle_terminal)
self.settings.hotkey_toggle_terminal.changed.connect(self.hotkey_toggle_terminal.setKey)
self.ignore_wheel_event = False
self.entry = entry
self.entry_file = entry[ATTR_FILE]
self.root = entry[ATTR_FILE].with_name(entry[ATTR_FILE].name + '.metadir')
self.make_sure_metadir_exists(self.root)
self.tab_bar.open_entry(self.root)
self.load_tab(0)
self.title_label.setText(entry[ATTR_TITLE])
self.setWindowTitle(entry[ATTR_TITLE])
self.textarea.setFocus()
# Message tray
self.message_tray = terminal.MessageTray(self)
self.terminal.show_message.connect(self.message_tray.add_message)
self.textarea.resized.connect(self.adjust_tray)
self.show()
def closeEvent(self, ev: QtGui.QCloseEvent) -> None:
success = self.save_tab()
if success or self.force_quit_flag:
self.closed.emit(self.entry_file)
self.settings.hotkey_next_tab.changed.disconnect(self.hotkey_next_tab.setKey)
self.settings.hotkey_prev_tab.changed.disconnect(self.hotkey_prev_tab.setKey)
self.settings.hotkey_save.changed.disconnect(self.hotkey_save_tab.setKey)
self.settings.hotkey_toggle_terminal.changed.disconnect(
self.hotkey_toggle_terminal.setKey)
ev.accept()
else:
ev.ignore()
def wheelEvent(self, ev: QtGui.QWheelEvent) -> None:
# If this isn't here textarea will call this method later
# and we'll get an infinite loop
if self.ignore_wheel_event:
self.ignore_wheel_event = False
return
self.ignore_wheel_event = True
self.textarea.wheelEvent(ev)
ev.ignore()
def adjust_tray(self) -> None:
rect = self.textarea.geometry()
self.message_tray.setGeometry(rect)
def create_layout(self, title_label: QtWidgets.QLabel, tab_bar: TabBar,
tab_counter: QtWidgets.QLabel, revision_notice: QtWidgets.QLabel,
textarea: QtWidgets.QTextEdit, terminal: BackstoryTerminal) -> None:
title_label.setAlignment(Qt.AlignCenter)
tab_bar.setDrawBase(False)
revision_notice.setAlignment(Qt.AlignCenter)
revision_notice.hide()
self.setLayout(
VBoxLayout(
title_label,
HBoxLayout(
Stretch(tab_bar, 1),
tab_counter,
),
revision_notice,
HBoxLayout(
Stretch(),
Stretch(textarea, 1),
Stretch(),
),
self.terminal,
)
)
def cmd_quit(self, arg: str) -> None:
self.force_quit_flag = arg == '!'
self.close()
def connect_signals(self) -> None:
self.tab_bar.set_tab_index.connect(self.set_tab_index)
self.settings.backstory_viewer_formats.changed.connect(
self.formatter.update_formats)
t = self.terminal
t.add_command(Command(
'new-page', 'New page',
self.cmd_new_page,
short_name='n',
args=ArgumentRules.REQUIRED,
arg_help={
'filename.txt': 'Create a new page with the specified filename.',
},
))
t.add_command(Command(
'delete-page', 'Delete page',
self.cmd_delete_current_page,
short_name='d',
arg_help={
'': 'Delete the open page.',
'!': 'Confirm the deletion.',
},
))
t.add_command(Command(
'rename-page', 'Rename page',
self.cmd_rename_current_page,
short_name='r',
args=ArgumentRules.REQUIRED,
arg_help={
'Foobar': 'Rename the page to "Foobar".',
},
))
t.add_command(Command(
'save-page', 'Save page',
self.cmd_save_current_page,
short_name='s',
args=ArgumentRules.NONE,
))
t.add_command(Command(
'print-filename', 'Print info about the open file',
self.cmd_print_filename,
short_name='f',
arg_help={
'': 'Print the name of the active file.',
'c': 'Print the last modified date of the active file.',
},
))
t.add_command(Command(
'count-words', "Print the page's wordcount",
self.cmd_count_words,
short_name='c',
args=ArgumentRules.NONE,
))
t.add_command(Command(
'quit', 'Quit',
self.cmd_quit,
short_name='q',
arg_help={
'': 'Close the window.',
'!': 'Force close the window.',
},
))
t.add_command(Command(
'revision-control', 'Revision control',
self.cmd_revision_control,
short_name='#',
arg_help={
'': 'Show latest revision.',
'+': 'Add new revision.',
'2': 'Show revision 2 (works with any number).',
'#': 'Print current revision.',
},
))
t.add_command(Command(
'external-edit', 'Open in external program/editor',
self.cmd_external_edit,
short_name='x',
args=ArgumentRules.OPTIONAL,
))
t.add_command(Command(
'search-and-replace', 'Search/replace',
self.searcher.search_or_replace,
short_name='/',
args=ArgumentRules.REQUIRED,
strip_input=False,
arg_help={
'foo': 'Search for "foo".',
'foo/b': (
'Search backwards for "foo". (Can be combined with the '
'other flags in any order.)'
),
'foo/i': (
'Search case-insensitively for "foo". '
'(Can be combined with the other flags in any order.)'
),
'foo/w': (
'Search for "foo", only matching whole words. '
'(Can be combined with the other flags in any order.)'
),
'foo/bar/': (
'Replace the first instance of "foo" with "bar", '
"starting from the cursor's position."
),
'foo/bar/[biw]': (
'The flags works just like in the search action.'
),
'foo/bar/a': (
'Replace all instances of "foo" with "bar". '
'(Can be combined with the other flags in any order.)'
)
},
))
def toggle_terminal(self) -> None:
if self.textarea.hasFocus():
self.terminal.show()
self.terminal.input_field.setFocus()
else:
self.terminal.hide()
self.textarea.setFocus()
def update_tab_counter(self) -> None:
w = len(str(self.tab_bar.count()))
self.tab_counter.setText(f'{self.tab_bar.currentIndex()+1:>{w}}/{self.tab_bar.count()}')
def save_tab(self) -> bool:
"""
Attempt to save the active tab, both the text and the scrollbar/cursor
position.
Return True if it succeeds, return False if it fails.
"""
if self.revision_active:
return True
current_tab = self.tab_bar.currentIndex()
if self.textarea.document().isModified():
try:
file = self.tab_bar.current_page_file()
first_line = read_metadata(file)[0]
data = self.textarea.toPlainText()
file.write_text(first_line + '\n' + data)
except Exception as e:
print(str(e))
self.terminal.error('Something went wrong when saving! (Use q! to force)')
return False
cursor_pos = self.textarea.textCursor().position()
scroll_pos = self.textarea.verticalScrollBar().sliderPosition()
self.tab_bar.set_page_position(current_tab, cursor_pos, scroll_pos)
self.textarea.document().setModified(False)
return True
def load_tab(self, new_tab: int) -> None:
"""
Load a new tab with the correct data and scrollbar/cursor position.
Note that this does not in any way save existing data.
"""
self.tab_bar.setCurrentIndex(new_tab)
self.update_tab_counter()
data = read_metadata(self.current_page_path())[1]
self.textarea.setPlainText(data)
self.textarea.document().setModified(False)
# Set the scrollbar/cursor positions
cursor_pos, scroll_pos = self.tab_bar.get_page_position(new_tab)
tc = self.textarea.textCursor()
tc.setPosition(min(cursor_pos, self.textarea.document().characterCount() - 1))
self.textarea.setTextCursor(tc)
self.textarea.verticalScrollBar().setSliderPosition(scroll_pos)
def set_tab_index(self, new_tab: int) -> None:
"""
This is called whenever the tab is changed, i.e. when either of these
things happen:
* left mouse press on tab
* mouse wheel scroll event on tab
* ctrl pgup/pgdn
"""
if self.revision_active:
self.revision_active = False
self.revision_notice.hide()
self.load_tab(new_tab)
else:
# Save the old tab if needed
success = self.save_tab()
if success:
# Load the new tab
self.load_tab(new_tab)
def make_sure_metadir_exists(self, root: Path) -> None:
"""
Create a directory with a stub page if none exist.
"""
if not root.exists():
root.mkdir()
for fname, title in self.settings.backstory_default_pages.value:
json_data = json.dumps(generate_page_metadata(title))
(root / fname).write_text(json_data + '\n', encoding='utf-8')
def current_page_path(self) -> Path:
""" Return the current page's full path, including root dir """
return self.tab_bar.current_page_file()
# ======= COMMANDS ========================================================
def cmd_new_page(self, fname: str) -> None:
file = self.root / fname
if file.exists():
self.terminal.error('File already exists')
return
title = fix_title(file)
try:
new_tab = self.tab_bar.add_page(title, file)
except KeyError as e:
self.terminal.error(e.args[0])
else:
file.write_text(json.dumps(generate_page_metadata(title)) + '\n')
# Do this afterwards to have something to load into textarea
self.set_tab_index(new_tab)
def cmd_delete_current_page(self, arg: Optional[str]) -> None:
if arg != '!':
self.terminal.error('Use d! to confirm deletion')
return
try:
file = self.tab_bar.remove_page()
except IndexError as e:
self.terminal.error(e.args[0])
else:
self.load_tab(self.tab_bar.currentIndex())
file.unlink()
def cmd_rename_current_page(self, title: str) -> None:
if not title.strip():
old_title = self.tab_bar.pages[self.tab_bar.currentIndex()][0]
self.terminal.prompt(f'r {old_title}')
return
try:
self.tab_bar.rename_page(title)
except KeyError as e:
self.terminal.error(e.args[0])
else:
file = self.current_page_path()
first_line, data = read_metadata(file)
json_data = json.loads(first_line)
json_data['title'] = title
file.write_text(json.dumps(json_data) + '\n' + data)
def cmd_save_current_page(self) -> None:
self.save_tab()
def cmd_print_filename(self, arg: Optional[str]) -> None:
file = self.current_page_path()
if arg == 'c':
date = json.loads(read_metadata(file)[0])['created']
self.terminal.print_(f'File created at {date}')
else:
self.terminal.print_(self.tab_bar.current_page_file().name)
def cmd_count_words(self) -> None:
wc = len(re.findall(r'\S+', self.textarea.document().toPlainText()))
self.terminal.print_(f'Words: {wc}')
def cmd_revision_control(self, arg: Optional[str]) -> None:
file = self.current_page_path()
json_data: PageMetadata = json.loads(read_metadata(file)[0])
if not arg:
if not self.revision_active:
self.terminal.error('Already showing latest revision')
else:
current_tab = self.tab_bar.currentIndex()
self.set_tab_index(current_tab)
elif arg == '+':
if self.revision_active:
self.terminal.error("Can't create new revision when viewing an old one")
return
saved = self.save_tab()
if saved:
# Do this again in case something got saved before
data = read_metadata(file)[1]
rev = json_data['revision']
shutil.copy2(file, file.with_name(f'{file.name}.rev{rev}'))
json_data['revision'] += 1
json_data['revision created'] = datetime.now().isoformat()
file.write_text(json.dumps(json_data) + '\n' + data)
self.terminal.print_(f'Revision increased to {rev + 1}')
# Show a certain revision
elif arg.isdigit():
revfname = file.with_name(f'{file.name}.rev{arg}')
if not revfname.exists():
self.terminal.error(f'Revision {arg} not found')
return
saved = self.save_tab()
if not saved:
return
try:
data = read_metadata(revfname)[1]
except Exception as e:
print(str(e))
self.terminal.error('Something went wrong when loading the revision')
else:
self.textarea.setPlainText(data)
self.textarea.document().setModified(False)
self.revision_active = True
changed_date = datetime.fromtimestamp(revfname.stat().st_mtime)
self.revision_notice.setText(
f'Showing revision {arg}. Changes will not be saved.\n'
f'Last modified: {changed_date}'
)
self.revision_notice.show()
elif arg == '#':
self.terminal.print_(f'Current revision: {json_data["revision"]}')
else:
self.terminal.error(f'Unknown argument: "{arg}"')
def cmd_external_edit(self, arg: str) -> None:
abbrev = arg.strip() or 'default'
if abbrev not in self.settings.external_commands.value:
self.terminal.error('No matching external command')
return
self.terminal.print_(run_external_command(
self.settings.external_commands.value[abbrev],
self.entry.as_dict()
))
class BackstoryTerminal(terminal.Terminal):
def __init__(self, parent: QtWidgets.QWidget, history_file: Path) -> None:
super().__init__(parent, history_file=history_file)
self.output_field.hide()
self.hide()
|
{
"content_hash": "5ba89462888d5b33e92fcf3386e0bf32",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 96,
"avg_line_length": 38.69106881405564,
"alnum_prop": 0.5545674714296526,
"repo_name": "nycz/sapfo",
"id": "3ea166909f68bb876f221df8372479bbc0a0a829",
"size": "26426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sapfo/backstorywindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6186"
},
{
"name": "Python",
"bytes": "157249"
}
],
"symlink_target": ""
}
|
"""Definition of grr_export plugin."""
import threading
import time
import logging
from grr.lib import data_store
from grr.lib import export
from grr.lib import output_plugin as output_plugin_lib
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import threadpool
from grr.lib import utils
class ExportPlugin(object):
"""Base class for grr_export.py plugins."""
__metaclass__ = registry.MetaclassRegistry
name = None
def ConfigureArgParser(self, parser):
pass
def Run(self, args):
raise NotImplementedError()
class OutputPluginBatchConverter(threadpool.BatchConverter):
"""BatchConverter that applies OutputPlugin to values.
This class applies specific OutputPlugin to the given set of values
using it in either multi-threaded or a single-threaded fashion. See
BatchConverter implementation for details.
Args:
output_plugin: OutputPlugin that will be applied to the values.
kwargs: Arguments that will be passed to threadpool.BatchConverter()
constructor.
"""
def __init__(self, output_plugin=None, **kwargs):
"""Constructor."""
if not output_plugin:
raise ValueError("output_plugin can't be None")
self.output_plugin = output_plugin
self.batches_count = 0
self.lock = threading.RLock()
super(OutputPluginBatchConverter, self).__init__(**kwargs)
@utils.Synchronized
def UpdateBatchCount(self):
"""Updates batches counter and prints a message."""
self.batches_count += 1
logging.info("Batch %d converted.", self.batches_count)
def ConvertBatch(self, batch):
"""Converts batch of values using passed HuntOutputPlugin."""
try:
self.output_plugin.ProcessResponses(batch)
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
self.UpdateBatchCount()
class OutputPluginBasedExportPlugin(ExportPlugin):
"""Base class for ExportPlugins that use OutputPlugins."""
def _ConfigureArgParserForRdfValue(self, parser, value_class):
"""Configures arguments parser with fields of the rdf value class.
This method scans given rdf value class and adds optional arguments to
the given parser. Arguments names are equal to corresponding fields in
the rdf value class. Arguments are only added if corresponding fields
have simple underlying type. Fields that have other protobufs as
underlying types are not added as arguments.
Args:
parser: argparse.ArgumentParser-compatible object.
value_class: Class that inherits from RDFValue.
"""
for type_descriptor in value_class.type_infos:
if (not type_descriptor.hidden and type_descriptor.proto_type_name in
["string", "bool", "uint64", "float"]):
kwargs = dict(
help=type_descriptor.description,
default=type_descriptor.default,
required=type_descriptor.required)
if type_descriptor.proto_type_name == "bool":
kwargs["action"] = "store_true"
else:
kwargs["type"] = type_descriptor.type
parser.add_argument("--" + type_descriptor.name, **kwargs)
def _InitRdfValueFromParsedArgs(self, value_class, parsed_args):
"""Builds RDFValue of the given class from given arguments.
This method is the reverse of the _ConfigureArgParserForRdfValue(). It
constructs RDFValue instance and passes parsed arguments that correspond
to the attributes of RDFValue that have corresponding primitive underlying
primitive protobuf types.
Args:
value_class: Class of value to build. Should be inherited from RDFValue.
parsed_args: argparser.Namespace-compatible object.
Returns:
RDFValue instance.
"""
args = dict()
for type_descriptor in value_class.type_infos:
if (not type_descriptor.hidden and type_descriptor.name in parsed_args and
type_descriptor.proto_type_name in
["string", "bool", "uint64", "float"]):
args[type_descriptor.name] = getattr(parsed_args, type_descriptor.name)
return value_class(**args)
def _FindOutputPluginByName(self, plugin_name):
"""Finds output plugin with a given name."""
for cls in output_plugin_lib.OutputPlugin.classes.itervalues():
if cls.name == plugin_name:
return cls
raise KeyError(plugin_name)
def _CreateOutputPluginFromArgs(self, collection_urn, args):
"""Creates OutputPlugin using given args as constructor arguments.
If OutputPlugin args has "export_options" attribute, we add
arguments corresponding to rdfvalue.ExportOptions.
Args:
collection_urn: Urn of the collection with the values to process.
args: argparse.Namespace-compatible object with parsed command
line arguments.
Returns:
OutputPlugin instance.
"""
output_plugin_class = self._FindOutputPluginByName(args.plugin)
if output_plugin_class.args_type:
output_plugin_args = self._InitRdfValueFromParsedArgs(
output_plugin_class.args_type, args)
if hasattr(output_plugin_args, "export_options"):
export_options = self._InitRdfValueFromParsedArgs(export.ExportOptions,
args)
output_plugin_args.export_options = export_options
else:
output_plugin_args = None
return output_plugin_class(
source_urn=collection_urn,
output_base_urn=rdfvalue.RDFURN("aff4:/export/%s" % time.time()),
args=output_plugin_args,
token=data_store.default_token)
def _ProcessValuesWithOutputPlugin(self, values, output_plugin, args):
"""Processes given values with given output plugin."""
checkpoints = utils.Grouper(values, args.checkpoint_every)
for index, checkpoint in enumerate(checkpoints):
logging.info("Starting checkpoint %d.", index)
batch_converter = OutputPluginBatchConverter(
batch_size=args.batch,
threadpool_size=args.threads,
output_plugin=output_plugin)
batch_converter.Convert(checkpoint)
logging.info("Checkpointing (checkpoint %d)...", index)
output_plugin.Flush()
logging.info("Checkpoint %d done.", index)
def GetValuesSourceURN(self, args):
"""Returns URN describing where exported values are coming from."""
_ = args
raise NotImplementedError()
def GetValuesForExport(self, args):
"""Returns values that should be processed with the OutputPlugin."""
_ = args
raise NotImplementedError()
def ConfigureArgParser(self, parser):
"""Configures args parser based on plugin's args RDFValue."""
self._ConfigureArgParserForRdfValue(parser, export.ExportOptions)
subparsers = parser.add_subparsers(title="Output plugins")
for cls in output_plugin_lib.OutputPlugin.classes.itervalues():
if not cls.name:
continue
subparser = subparsers.add_parser(cls.name, help=cls.description)
subparser.set_defaults(plugin=cls.name)
if cls.args_type:
self._ConfigureArgParserForRdfValue(subparser, cls.args_type)
def Run(self, args):
"""Applies output plugin to the given collection."""
output_plugin = self._CreateOutputPluginFromArgs(
self.GetValuesSourceURN(args), args)
logging.info("Initialized plugin '%s' with the state:", output_plugin.name)
logging.info(utils.SmartUnicode(output_plugin.state))
collection = self.GetValuesForExport(args)
self._ProcessValuesWithOutputPlugin(collection, output_plugin, args)
|
{
"content_hash": "86a278d3df8e4285700c15f28be2a4f8",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 80,
"avg_line_length": 34.46788990825688,
"alnum_prop": 0.7017567207878627,
"repo_name": "destijl/grr",
"id": "ba827642ec3e4a1542b3f3c8696813037ec5d77d",
"size": "7536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/tools/export_plugins/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304794"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26524"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "173692"
},
{
"name": "JavaScript",
"bytes": "63181"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "307091"
},
{
"name": "Python",
"bytes": "6407750"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40334"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from benchpress.benchmarks import util
# if Bohrium is installed we use its convolve else we use the convolve from SciPy
try:
raise ImportError
import bohrium as bh
convolveNd = bh.convolve_scipy
except ImportError:
import scipy
from scipy import signal
convolveNd = signal.convolve
bench = util.Benchmark("Convolution Filter (any dimensional)", "<image-size>*<filter-size>*<ndims>*<niters>")
def main():
"""
Convolve filter (any dimensional)
Parameter: `<image-size>*<filter-size>*<ndims>*<niters>`
where image and filter size is the size of each dimension (not their total size).
"""
(image_size, filter_size, ndims, I) = bench.args.size
image = bench.random_array((image_size ** ndims,)).reshape([image_size] * ndims)
image_filter = bench.random_array((filter_size ** ndims,)).reshape([filter_size] * ndims)
bench.start()
for _ in range(I):
R = convolveNd(image, image_filter)
bench.flush()
bench.stop()
bench.save_data({'res': R})
bench.pprint()
if __name__ == "__main__":
main()
|
{
"content_hash": "9cbaa0c024da73e72fe3105498b7c3ec",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 109,
"avg_line_length": 29.94736842105263,
"alnum_prop": 0.655536028119508,
"repo_name": "bh107/benchpress",
"id": "9bb97aae19ca49d3a9d221789585d340e8fd5175",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchpress/benchmarks/convolve/python_numpy/convolve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "94777"
},
{
"name": "C#",
"bytes": "92860"
},
{
"name": "C++",
"bytes": "163618"
},
{
"name": "CMake",
"bytes": "211"
},
{
"name": "Chapel",
"bytes": "2321"
},
{
"name": "Makefile",
"bytes": "2223"
},
{
"name": "Objective-C",
"bytes": "40804"
},
{
"name": "Python",
"bytes": "195922"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
class SamplingOpsTest(tf.test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [tf.zeros([1, 3]), tf.ones([1, 5])]
label = tf.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
initial_p = [.1, .3, .1, .3, .2] # only used for stratified_sample
batch_size = 16
# Curry the rejection sampler so we can easily run the same tests on both
# stratified_sample and stratified_sample_unknown_dist.
def curried_sampler(tensors, labels, probs, batch_size, enqueue_many=True):
return tf.contrib.framework.sampling_ops.stratified_sample(
tensors=tensors,
labels=labels,
target_probs=probs,
batch_size=batch_size,
init_probs=initial_p,
enqueue_many=enqueue_many)
samplers = [
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist,
curried_sampler,
]
for sampler in samplers:
logging.info('Now testing `%s`', sampler.__class__.__name__)
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampler(val, tf.zeros([]), probs, batch_size, enqueue_many=True)
with self.assertRaises(ValueError):
sampler(val, tf.zeros([1, 1]), probs, batch_size, enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampler(val, tf.constant([0, 1, 0, 0, 0]), probs, batch_size)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampler(tf.zeros([1, 3]), label, probs, batch_size)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampler(val, tf.constant(1), probs, batch_size, enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampler([tf.zeros([2, 1])], label, probs, batch_size, enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampler(val, label, 1, batch_size)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampler(
val, label, tf.placeholder(
tf.float32, shape=[None]), batch_size)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
# Probabilities must be 1D.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val, label, np.array([[.25, .25], [.25, .25]]), batch_size)
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [tf.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = tf.placeholder(tf.int32, shape=[None])
probs_ph = tf.placeholder(tf.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = tf.contrib.framework.sampling_ops._verify_input(
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def batchingBehaviorHelper(self, sampler):
batch_size = 20
input_batch_size = 11
val_input_batch = [tf.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 1,
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampler(
val_input_batch, lbl_input_batch, probs, batch_size, enqueue_many=True)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4])]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = tf.contrib.framework.sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.framework.sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val_input_batch, lbl_input_batch, probs, batch_size)
batches += tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val_input_batch, lbl_input_batch, probs, batch_size)
summary_op = tf.merge_summary(tf.get_collection(tf.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testBatchingBehavior(self):
self.batchingBehaviorHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionBatchingBehavior(self):
initial_p = [0, .3, 0, .7, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=True):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.batchingBehaviorHelper(curried_sampler)
def testProbabilitiesCanBeChanged(self):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = tf.placeholder(tf.float32, shape=[5])
batch_size = 2
data_batch, labels = tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist( # pylint: disable=line-too-long
val, label, probs, batch_size)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(5):
[data], lbls = sess.run([data_batch, labels],
feed_dict={probs: [1, 0, 0, 0, 0]})
for data_example in data:
self.assertListEqual([0, 0], list(data_example))
self.assertListEqual([0, 0], list(lbls))
# Now change distribution and expect different output.
for _ in range(5):
[data], lbls = sess.run([data_batch, labels],
feed_dict={probs: [0, 0, 0, 1, 0]})
for data_example in data:
self.assertListEqual([3, 12], list(data_example))
self.assertListEqual([3, 3], list(lbls))
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = tf.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = tf.placeholder(tf.float32) # completely undefined shape
labels_ph = tf.placeholder(tf.int32) # completely undefined shape
val_tf, labels_tf, _ = tf.contrib.framework.sampling_ops._verify_input(
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def dataListHelper(self, sampler):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4]), tf.ones([2, 4]), tf.ones(2) * 3]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampler(val_input_batch, lbl_input_batch, probs,
batch_size)
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, tf.Tensor))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def testDataListInput(self):
self.dataListHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionDataListInput(self):
initial_p = [0, 1, 0, 0, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.dataListHelper(curried_sampler)
def normalBehaviorHelper(self, sampler):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testNormalBehavior(self):
self.normalBehaviorHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "fd1c4e3cf143f5d78d02646fb651d891",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 123,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.6354831766984719,
"repo_name": "Lab603/PicEncyclopedias",
"id": "35b56bdfa1a0c8871344627df9be12759a0a3fe5",
"size": "14649",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jni-build/jni/include/tensorflow/contrib/framework/python/ops/sampling_ops_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "361482"
},
{
"name": "C++",
"bytes": "22994090"
},
{
"name": "CMake",
"bytes": "72924"
},
{
"name": "CSS",
"bytes": "1548"
},
{
"name": "HTML",
"bytes": "1040352"
},
{
"name": "Java",
"bytes": "252082"
},
{
"name": "JavaScript",
"bytes": "25902"
},
{
"name": "Jupyter Notebook",
"bytes": "3547008"
},
{
"name": "Makefile",
"bytes": "47206"
},
{
"name": "Objective-C",
"bytes": "10664"
},
{
"name": "Objective-C++",
"bytes": "91354"
},
{
"name": "Python",
"bytes": "19063444"
},
{
"name": "Shell",
"bytes": "476334"
},
{
"name": "TypeScript",
"bytes": "1264488"
}
],
"symlink_target": ""
}
|
import uuid
from keystone.common import sql
from keystone.contrib import revoke
from keystone.contrib.revoke import model
class RevocationEvent(sql.ModelBase, sql.ModelDictMixin):
__tablename__ = 'revocation_event'
attributes = model.REVOKE_KEYS
# The id field is not going to be exposed to the outside world.
# It is, however, necessary for SQLAlchemy.
id = sql.Column(sql.String(64), primary_key=True)
domain_id = sql.Column(sql.String(64))
project_id = sql.Column(sql.String(64))
user_id = sql.Column(sql.String(64))
role_id = sql.Column(sql.String(64))
trust_id = sql.Column(sql.String(64))
consumer_id = sql.Column(sql.String(64))
access_token_id = sql.Column(sql.String(64))
issued_before = sql.Column(sql.DateTime(), nullable=False)
expires_at = sql.Column(sql.DateTime())
revoked_at = sql.Column(sql.DateTime(), nullable=False)
audit_id = sql.Column(sql.String(32))
audit_chain_id = sql.Column(sql.String(32))
class Revoke(revoke.Driver):
def _flush_batch_size(self, dialect):
batch_size = 0
if dialect == 'ibm_db_sa':
# This functionality is limited to DB2, because
# it is necessary to prevent the transaction log
# from filling up, whereas at least some of the
# other supported databases do not support update
# queries with LIMIT subqueries nor do they appear
# to require the use of such queries when deleting
# large numbers of records at once.
batch_size = 100
# Limit of 100 is known to not fill a transaction log
# of default maximum size while not significantly
# impacting the performance of large token purges on
# systems where the maximum transaction log size has
# been increased beyond the default.
return batch_size
def _prune_expired_events(self):
oldest = revoke.revoked_before_cutoff_time()
session = sql.get_session()
dialect = session.bind.dialect.name
batch_size = self._flush_batch_size(dialect)
if batch_size > 0:
query = session.query(RevocationEvent.id)
query = query.filter(RevocationEvent.revoked_at < oldest)
query = query.limit(batch_size).subquery()
delete_query = (session.query(RevocationEvent).
filter(RevocationEvent.id.in_(query)))
while True:
rowcount = delete_query.delete(synchronize_session=False)
if rowcount == 0:
break
else:
query = session.query(RevocationEvent)
query = query.filter(RevocationEvent.revoked_at < oldest)
query.delete(synchronize_session=False)
session.flush()
def get_events(self, last_fetch=None):
self._prune_expired_events()
session = sql.get_session()
query = session.query(RevocationEvent).order_by(
RevocationEvent.revoked_at)
if last_fetch:
query = query.filter(RevocationEvent.revoked_at > last_fetch)
events = [model.RevokeEvent(**e.to_dict()) for e in query]
return events
def revoke(self, event):
kwargs = dict()
for attr in model.REVOKE_KEYS:
kwargs[attr] = getattr(event, attr)
kwargs['id'] = uuid.uuid4().hex
record = RevocationEvent(**kwargs)
session = sql.get_session()
with session.begin():
session.add(record)
|
{
"content_hash": "1321e808ef82949463c23d565228f3dd",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 73,
"avg_line_length": 38.619565217391305,
"alnum_prop": 0.6234168308471714,
"repo_name": "UTSA-ICS/keystone-kerberos",
"id": "8d50ac6cc0be0dc29695b97b5e04e6e27af9a00a",
"size": "4099",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "keystone/contrib/revoke/backends/sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3521321"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
}
|
""" support for providing temporary directories to test functions. """
import pytest, py
from _pytest.monkeypatch import monkeypatch
class TempdirHandler:
def __init__(self, config):
self.config = config
self.trace = config.trace.get("tmpdir")
def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
#py.log._apiwarn(">1.1", "use tmpdir function argument")
return self.getbasetemp().ensure(string, dir=dir)
def mktemp(self, basename, numbered=True):
basetemp = self.getbasetemp()
if not numbered:
p = basetemp.mkdir(basename)
else:
p = py.path.local.make_numbered_dir(prefix=basename,
keep=0, rootdir=basetemp, lock_timeout=None)
self.trace("mktemp", p)
return p
def getbasetemp(self):
""" return base temporary directory. """
try:
return self._basetemp
except AttributeError:
basetemp = self.config.option.basetemp
if basetemp:
basetemp = py.path.local(basetemp)
if basetemp.check():
basetemp.remove()
basetemp.mkdir()
else:
basetemp = py.path.local.make_numbered_dir(prefix='pytest-')
self._basetemp = t = basetemp.realpath()
self.trace("new basetemp", t)
return t
def finish(self):
self.trace("finish")
def pytest_configure(config):
mp = monkeypatch()
t = TempdirHandler(config)
config._cleanup.extend([mp.undo, t.finish])
mp.setattr(config, '_tmpdirhandler', t, raising=False)
mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
@pytest.fixture
def tmpdir(request):
"""return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
"""
name = request.node.name
name = py.std.re.sub("[\W]", "_", name)
x = request.config._tmpdirhandler.mktemp(name, numbered=True)
return x
|
{
"content_hash": "75384fad77edb1e134f04f995077f1bb",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 35.64705882352941,
"alnum_prop": 0.6117986798679867,
"repo_name": "ktan2020/legacy-automation",
"id": "3907c92b73f489b07e5c0cb811fe4663fc0f311f",
"size": "2424",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/pytest-2.3.4-py2.7.egg/_pytest/tmpdir.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
from myclasses import Logger
import unittest
__author__ = 'kinpa200296'
test_res = '''say_hello() = Hello, world!
say_hello(name=Bob) = Hello, Bob!
calc_sum(10, 20) = 30
calc_sum(3, 4) = 7
say_age(18) = John is 18.
say_age(18, name=Bob) = Bob is 18.'''
class Dummy(Logger):
@staticmethod
def say_hello(name=None):
if name is None:
return 'Hello, world!'
else:
return 'Hello, {name}!'.format(name=name)
@staticmethod
def calc_sum(a, b):
return a + b
@staticmethod
def say_age(age, name='John'):
return '{name} is {age}.'.format(name=name, age=age)
class BadDummy(Logger):
def __init__(self):
pass
class TestLogger(unittest.TestCase):
def test_wrong_inheritance(self):
with self.assertRaises(TypeError):
bad_dummy = BadDummy()
bad_dummy.func()
def test_empty_log(self):
dummy = Dummy()
self.assertEqual(str(dummy), '')
def test_logs(self):
dummy = Dummy()
dummy.say_hello()
dummy.say_hello(name='Bob')
dummy.calc_sum(10, 20)
dummy.calc_sum(3, 4)
dummy.say_age(18)
dummy.say_age(18, name='Bob')
self.assertEqual(str(dummy), test_res)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLogger)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "f9fbea5911a9609f9f88f2d6fbaef2f3",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 63,
"avg_line_length": 23.67241379310345,
"alnum_prop": 0.5957756737072105,
"repo_name": "kinpa200296/python_labs",
"id": "b3f229c2413953409c380445f612a92576fff5e3",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab2/tests/MyClassesTests/LoggerTests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "734"
},
{
"name": "Python",
"bytes": "52669"
}
],
"symlink_target": ""
}
|
""":mod:`robot.api` package exposes the public APIs of Robot Framework.
Unless stated otherwise, the APIs exposed in this package are considered
stable, and thus safe to use when building external tools on top of
Robot Framework. Notice that all parsing APIs were rewritten in Robot
Framework 3.2.
Currently exposed APIs are:
* :mod:`.logger` module for libraries' logging purposes.
* :mod:`.deco` module with decorators libraries can utilize.
* :mod:`.exceptions` module containing exceptions that libraries can utilize for
reporting failures and other events. These exceptions can be imported also directly
via :mod:`robot.api` like ``from robot.api import SkipExecution``.
* :mod:`.parsing` module exposing the parsing APIs. This module is new in Robot
Framework 4.0. Various parsing related functions and classes were exposed
directly via :mod:`robot.api` already in Robot Framework 3.2, but they are
effectively deprecated and will be removed in the future.
* :class:`~robot.running.model.TestSuite` class for creating executable
test suites programmatically and
:class:`~robot.running.builder.builders.TestSuiteBuilder` class
for creating such suites based on existing test data on the file system.
* :class:`~robot.model.visitor.SuiteVisitor` abstract class for processing testdata
before execution. This can be used as a base for implementing a pre-run
modifier that is taken into use with ``--prerunmodifier`` commandline option.
* :func:`~robot.result.resultbuilder.ExecutionResult` factory method
for reading execution results from XML output files and
:class:`~robot.result.visitor.ResultVisitor` abstract class to ease
further processing the results.
:class:`~robot.result.visitor.ResultVisitor` can also be used as a base
for pre-Rebot modifier that is taken into use with ``--prerebotmodifier``
commandline option.
* :class:`~robot.reporting.resultwriter.ResultWriter` class for writing
reports, logs, XML outputs, and XUnit files. Can write results based on
XML outputs on the file system, as well as based on the result objects
returned by the :func:`~robot.result.resultbuilder.ExecutionResult` or
an executed :class:`~robot.running.model.TestSuite`.
All of the above names can be imported like::
from robot.api import ApiName
See documentations of the individual APIs for more details.
.. tip:: APIs related to the command line entry points are exposed directly
via the :mod:`robot` root package.
"""
from robot.model import SuiteVisitor
from robot.parsing import (get_tokens, get_resource_tokens, get_init_tokens,
get_model, get_resource_model, get_init_model,
Token)
from robot.reporting import ResultWriter
from robot.result import ExecutionResult, ResultVisitor
from robot.running import TestSuite, TestSuiteBuilder
from .exceptions import ContinuableFailure, Failure, FatalError, Error, SkipExecution
|
{
"content_hash": "71397acb95ab01e9626b56a7a2e0338f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 85,
"avg_line_length": 46.046875,
"alnum_prop": 0.7675602307431286,
"repo_name": "HelioGuilherme66/robotframework",
"id": "7c2eb459ec39c1bbb09ae52eb07a6bbef89bd2f5",
"size": "3591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44706"
},
{
"name": "HTML",
"bytes": "86409"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2671114"
},
{
"name": "RobotFramework",
"bytes": "1231105"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os
import re
from argparse import ArgumentParser
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
from cStringIO import StringIO
def getType(v):
if hasattr(v, "decl_type"):
return getType(v.decl_type)
if hasattr(v, "declaration"):
return getType(v.declaration)
return v
class IdxGenerator(object):
"""Generates a the .idx file for an ITK wrapping submodule (which usually
corresponds to a class)."""
def __init__(self, moduleName):
self.moduleName = moduleName
# the output file
self.outputFile = StringIO()
def create_idxfile(self, idxFilePath, wrappersNamespace):
# iterate over all the typedefs in the _wrapping_::wrappers namespace
for typedef in wrappersNamespace.typedefs():
n = typedef.name
s = getType(typedef).decl_string
# drop the :: prefix - it make swig produce invalid code
if s.startswith("::"):
s = s[2:]
self.outputFile.write("{%s} {%s} {%s}\n" % (s, n, self.moduleName))
content = self.outputFile.getvalue()
with open(idxFilePath, "w") as f:
f.write(content)
class SwigInputGenerator(object):
"""Generates a swig input .i file for an ITK module."""
notWrapped = [
"itk::AtomicInt<.+>",
"itk::MapContainer< unsigned long, itk::CellInterface<.+>",
"itk::VectorContainer< unsigned long, itk::CellInterface<.+>",
"itk::CellInterface< double, itk::QuadEdgeMeshCellTraitsInfo<.+>",
"itk::QuadEdgeMeshLineCell< itk::CellInterface<.+>",
"itk::SmartPointerForwardReference<.+>",
"itk::LibHandle",
"itk::NeighborhoodAllocator<.+>",
# to avoid wrapping all the region for all the dims
"itk::ImageRegion<.+>",
"itk::ImportImageContainer<.+>",
"itk::DefaultPixelAccessor<.+>",
"itk::NeighborhoodAccessorFunctor<.+>",
"itk::DefaultVectorPixelAccessor<.+>",
"itk::VectorImageNeighborhoodAccessorFunctor<.+>",
"itk::.*Iterator.*", # TODO: remove this one ?
"itk::Neighborhood<.+>", # TODO: remove this one
"itk::ThreadFunctionType",
"itk::Functor::.+",
"itk::SmartPointer< itk::Functor::.+",
"itk::Function::.+",
"itk::.+Function.*", # Level set functions
"itk::watershed::.+", # ignore the internal classes of the watershed
# require to wrap too more type
"itk::SmartPointer< itk::VoronoiDiagram2D<.+> >",
# used internally in ImageToImageMetric
"itk::Image< itk::CovariantVector< double, \d+u >, \d+u >",
"itk::FixedArray< itk::SmartPointer.+ >",
# used internally in itkTransformBase
"itk::SmartPointer< itk::Transform.+ >",
# used internally in itkMattesMutualInformationImageToImageMetric
"itk::SmartPointer< itk::Image.+ >"
]
notWrappedRegExp = re.compile("|".join(["^" + s + "$" for s in notWrapped]))
# stdcomplex code
stdcomplex_headers = {
"D": """ class stdcomplexD {
public:
~stdcomplexD();
stdcomplexD & operator=(stdcomplexD const & arg0);
stdcomplexD(stdcomplexD const & arg0);
stdcomplexD(stdcomplexD __z);
stdcomplexD(double __r = 0.0, double __i = 0.0);
stdcomplexD(stdcomplexF const & __z);
double real();
double const real() const;
double imag();
double const imag() const;
stdcomplexD & operator=(double __d);
stdcomplexD & operator+=(double __d);
stdcomplexD & operator-=(double __d);
stdcomplexD & operator*=(double __d);
stdcomplexD & operator/=(double __d);
// stdcomplexD const & __rep() const;
private:
protected:
};
""",
"F": """class stdcomplexF {
public:
~stdcomplexF();
stdcomplexF & operator=(stdcomplexF const & arg0);
stdcomplexF(stdcomplexF const & arg0);
stdcomplexF(stdcomplexF __z);
stdcomplexF(float r = 0.0f, float i = 0.0f);
stdcomplexF(stdcomplexD const & __z);
float real();
float const real() const;
float imag();
float const imag() const;
stdcomplexF & operator=(float __f);
stdcomplexF & operator+=(float __f);
stdcomplexF & operator-=(float __f);
stdcomplexF & operator*=(float __f);
stdcomplexF & operator/=(float __f);
// stdcomplexF const & __rep() const;
private:
protected:
};
"""}
def __init__(self, moduleName, options):
self.moduleName = moduleName
self.options = options
self.outputFile = StringIO()
self.applyFileNames = []
# a dict to let us use the alias name instead of the full c++ name. Without
# that, in many cases, swig don't know that's the same type
self.aliases = {}
# a set of used types
self.usedTypes = set()
# a dict to store the file where the def comes from
self.typedefSource = {}
self.warnings = set()
self.mdx_loaded = set()
self.verbose = options.verbose
def warn(self, id, msg, doWarn=True):
if not doWarn:
# don't warn for anything
return
if str(id) not in self.options.warnings:
if not self.verbose and (id, msg) in self.warnings:
# just do nothing
return
self.warnings.add((id, msg))
if self.verbose:
if self.options.warningError:
print("error(%s): %s" % (str(id), msg), file=sys.stderr)
else:
print("warning(%s): %s" % (str(id), msg), file=sys.stderr)
else:
if self.options.warningError:
print(
"%s: error(%s): %s" %
(self.moduleName, str(id), msg), file=sys.stderr)
else:
print(
"%s: warning(%s): %s" %
(self.moduleName, str(id), msg), file=sys.stderr)
def info(self, msg):
if self.verbose:
print("info: %s" % msg, file=sys.stderr)
@staticmethod
def getDeclarationString(t):
t = getType(t)
if t.decl_string == "::PyObject *":
# don't go further - we want to keep that one as is
return "::PyObject *"
if isinstance(t, pygccxml.declarations.cpptypes.pointer_t):
return SwigInputGenerator.getDeclarationString(getType(t.base)) + " *"
elif isinstance(t, pygccxml.declarations.cpptypes.const_t):
return SwigInputGenerator.getDeclarationString(getType(t.base)) + " const"
elif isinstance(t, pygccxml.declarations.cpptypes.reference_t):
return SwigInputGenerator.getDeclarationString(getType(t.base)) + " &"
return t.decl_string
def renameTypesInSTL(self, s):
if s.startswith("std::") and \
pygccxml.declarations.templates.is_instantiation(s):
args = []
for arg in pygccxml.declarations.templates.args(s):
t, d = SwigInputGenerator.typeAndDecorators(arg)
args.append(self.renameTypesInSTL(self.get_alias(t)) + d)
return pygccxml.declarations.templates.join(
pygccxml.declarations.templates.name(s),
args) + SwigInputGenerator.typeAndDecorators(s)[1]
return s
@staticmethod
def removeStdAllocator(s):
if pygccxml.declarations.templates.is_instantiation(s):
args = []
for arg in pygccxml.declarations.templates.args(s):
if not arg.startswith("std::allocator"):
t, d = SwigInputGenerator.typeAndDecorators(arg)
args.append(SwigInputGenerator.removeStdAllocator(t) + d)
return pygccxml.declarations.templates.join(
pygccxml.declarations.templates.name(s),
args) + SwigInputGenerator.typeAndDecorators(s)[1]
return s
@staticmethod
def typeAndDecorators(s):
end = ""
s = s.strip()
ends = [" ", "*", "&", "const"]
needToContinue = True
while needToContinue:
needToContinue = False
for e in ends:
if s.endswith(e):
end = e + end
s = s[:-len(e)]
needToContinue = True
return (s, end)
def get_alias(self, decl_string, w=True):
s = str(decl_string)
# drop the :: prefix - it make swig produce invalid code
if s.startswith("::"):
s = s[2:]
# normalize string
s = SwigInputGenerator.normalize(s)
# workaround a bug - or is it a feature ? - somewhere
s = s.replace("complex float", "std::complex<float>")
s = s.replace("complex double", "std::complex<double>")
s = s.replace("complex long double", "std::complex<long double>")
(s, end) = SwigInputGenerator.typeAndDecorators(s)
if s in self.aliases:
self.usedTypes.add(self.aliases[s])
return self.aliases[s] + end
if s.startswith("itk::Templates::"):
# that's a explicitly instantiated type. The name is the same than
# the WrapITK one, so lets use it as a base for WrapITK
# Ex: itk::Templates::RGBPixelUC
# don't store the new string in s, because we need it unchanged if
# the type is explicitly instantiated, but not wrapped
new_s = s.replace("::Templates::", "")
if new_s.split("::")[0] in self.aliases.values():
self.usedTypes.add(new_s)
return new_s + end
if s[:s.rfind("::")] in self.aliases:
# take care of subtypes/enum/...
alias = self.aliases[s[:s.rfind("::")]] + s[s.rfind("::"):]
self.usedTypes.add(alias)
return alias + end
# replace the types defined in this type, to support
# std::vector<itkDataObject> for example
s = self.renameTypesInSTL(s)
# drop the allocator part of the type, because it is not supported by the
# %template directive with some generators (like tcl)
s = SwigInputGenerator.removeStdAllocator(s)
# rename basic_string to std::string to make name shorter
s = s.replace("std::basic_string< char >", "std::string")
s = s.replace(
"std::basic_string< char, std::char_traits< char > >",
"std::string")
s = s.replace(
"std::basic_ostream< char, std::char_traits< char > >",
"std::ostream")
s = s.replace(
"std::basic_istream< char, std::char_traits< char > >",
"std::istream")
s = s.replace(
"std::basic_ofstream< char, std::char_traits< char > >",
"std::ostream")
s = s.replace(
"std::basic_ifstream< char, std::char_traits< char > >",
"std::istream")
# rename some types not renamed by gccxml (why ?)
s = s.replace("itk::SerieUIDContainer", "std::vector< std::string >")
s = s.replace("itk::FilenamesContainer", "std::vector< std::string >")
if s.startswith("itk::") and not self.notWrappedRegExp.match(s):
self.warn(
4,
"ITK type not wrapped, or currently not known: %s" %
s,
w)
self.usedTypes.add(s)
return s + end
def load_idx(self, file_name):
with open(file_name, "r") as f:
for line in f:
(full_name, alias, module) = \
re.findall(r'{(.*)} {(.*)} {(.*)}', line)[0]
# workaround lack of :: prefix in idx files
# TODO: would it be better to remove the :: prefix in the output of
# pygccxml ?
# full_name = "::"+full_name
# normalize some basic type names
full_name = self.normalize(full_name)
if full_name in self.aliases:
# If the full_name key alreay exists, do not overwrite the
# value. load_idx() is called once before load_mdx(), making
# sure the first aliases loaded are the ones belonging to
# the current submodule (and the next load_idx() calls
# should not overwrite these aliases.
continue
self.aliases[full_name] = alias
# store the source of the def
if alias in self.typedefSource and file_name != self.typedefSource[alias]:
self.warn(
7, "%s in %s is already defined in %s." %
(alias, file_name, self.typedefSource[alias]))
else:
self.typedefSource[alias] = file_name
def load_mdx(self, file_name):
if file_name in self.mdx_loaded:
# already loaded - no need to do it again
return
self.mdx_loaded.add(file_name)
with open(file_name, "r") as f:
lines = f.readlines()
for line in lines:
line_stripped = line.strip()
if line.startswith('%') or line.isspace():
# exclude the lines which are starting with % - that's not the idx
# files
pass
elif line_stripped.endswith(".mdx"):
self.load_mdx(os.path.dirname(file_name) + os.sep + line_stripped)
elif line_stripped[:-4] == self.moduleName:
continue
else:
self.load_idx(os.path.dirname(file_name) + os.sep + line_stripped)
@staticmethod
def normalize(name):
name = name.replace("short unsigned int", "unsigned short")
name = name.replace("long unsigned int", "unsigned long")
name = name.replace("long long unsigned int", "unsigned long long")
name = name.replace("short int", "short")
name = name.replace("long int", "long")
name = name.replace("long long int", "long long")
# name = name.replace("unsigned int", "unsigned")
# normalize spaces
name = " ".join(name.replace(',', ', ').split())
return name
def generate_class(self, typedef, indent=0):
self.info("Generating interface for %s." % typedef.name)
decls = pygccxml.declarations
if not typedef.name.startswith("stdcomplex"):
super_classes = []
for super_class in getType(typedef).bases:
super_classes.append(
"%s %s" %
(super_class.access,
self.get_alias(
super_class.related_class.decl_string)))
s = ""
if super_classes:
s = " : " + ", ".join(super_classes)
self.outputFile.write(" " * indent)
self.outputFile.write("class %s%s {\n" % (typedef.name, s))
# iterate over access
for access in decls.ACCESS_TYPES.ALL:
# the access type
self.outputFile.write(" " * indent)
self.outputFile.write(" %s:\n" % access)
# warnings or no warning?
w = access not in self.options.access_warnings
# iterate over the members
for member in getType(typedef).get_members(access=access):
if isinstance(member, decls.typedef.typedef_t):
self.warn(
51,
"Member typedef are not supported: %s" %
member.name,
w)
elif isinstance(member, decls.member_function_t):
self.generate_method(typedef, member, indent, w)
elif isinstance(member, decls.constructor_t):
self.generate_constructor(typedef, member, indent, w)
elif isinstance(member, decls.member_operator_t):
self.generate_method(typedef, member, indent, w)
elif isinstance(member, decls.destructor_t):
self.generate_destructor(typedef, member, indent, w)
elif isinstance(member, decls.enumeration_t):
self.generate_nested_enum(typedef, member, indent, w)
elif isinstance(member, decls.variable_t):
self.warn(
52,
"Member variables are not supported: %s" %
member.name,
w)
elif isinstance(member, decls.class_declaration.class_t):
self.warn(
53,
"Member classes are not supported: %s" %
member.name,
w)
elif isinstance(
member, decls.class_declaration.class_declaration_t):
self.warn(
53,
"Member classes are not supported: %s" %
member.name,
w)
elif isinstance(member, decls.casting_operator_t):
self.warn(
54,
"Member casting operators are not supported: %s" %
member.name,
w)
else:
self.warn(
50,
"Unknown member type: %s" %
repr(member),
w)
# finally, close the class
self.outputFile.write(" " * indent)
self.outputFile.write("};\n\n\n")
elif typedef.name == "stdcomplexD":
self.outputFile.write(self.stdcomplex_headers["D"] + '\n')
elif typedef.name == "stdcomplexF":
self.outputFile.write(self.stdcomplex_headers["F"] + '\n')
else:
print('stdcomplex', typedef.name)
# stdcomplex is too difficult to wrap in some cases. Only wrap the
# constructor.
self.outputFile.write(" " * indent)
self.outputFile.write("class %s%s {\n" % (typedef.name, s))
# iterate over access
for access in pygccxml.declarations.ACCESS_TYPES.ALL:
# the access type
self.outputFile.write(" " * indent)
self.outputFile.write(" %s:\n" % access)
# warnings or no warning?
w = access not in self.options.access_warnings
for member in getType(typedef).get_members(access=access):
if isinstance(member, decls.constructor_t):
self.generate_constructor(typedef, member, indent, w)
elif isinstance(member, decls.destructor_t):
self.generate_destructor(typedef, member, indent, w)
# finally, close the class
self.outputFile.write(" " * indent)
self.outputFile.write("};\n\n\n")
def generate_constructor(self, typedef, constructor, indent, w):
# iterate over the arguments
args = []
for arg in constructor.arguments:
s = "%s %s" % (self.get_alias(self.getDeclarationString(arg), w), arg.name)
# append the default value if it exists
if arg.default_value:
s += " = %s" % arg.default_value
# and add the string to the arg list
args.append(s)
self.outputFile.write(" " * indent)
self.outputFile.write(" %s(%s);\n" % (typedef.name, ", ".join(args)))
def generate_destructor(self, typedef, destructor, indent, w):
self.outputFile.write(" " * indent)
self.outputFile.write(" ~%s();\n" % typedef.name)
def generate_enum(self, typedef):
name = typedef.name
enum = getType(typedef)
decl_string = typedef.decl_type.decl_string
# extract the namespace to put it in c++ code. Without that, the code
# generated by swig
# is wrong because it doesn't include the namespace
ns = "::".join(decl_string.split("::")[:-1])
self.outputFile.write("%{\n")
self.outputFile.write("using namespace %s;\n" % ns)
self.outputFile.write("%}\n")
content = [" %s = %i" % (key, value) for key, value in enum.values]
self.outputFile.write("enum %s { %s };\n" % (name, ", ".join(content)))
def generate_nested_enum(self, typedef, enum, indent, w):
content = [" %s = %i" % (key, value) for key, value in enum.values]
self.outputFile.write(" " * indent)
self.outputFile.write(" enum %s { %s };\n" % (enum.name, ", ".join(content)))
def generate_method(self, typedef, method, indent, w):
self.info("Generating interface for method '%s::%s'." %
(typedef.name, method.name))
# avoid the apply method for the class vnl_c_vector: the signature is
# quite strange and currently confuse swig :-/
if "(" in getType(method.return_type).decl_string:
self.warn(
1, "ignoring method not supported by swig '%s::%s'." %
(typedef.name, method.name), w)
return
names = [
"rBegin",
"rEnd",
"GetSpacingCallback",
"GetOriginCallback",
"Begin",
"End"]
if ((typedef.name.startswith('vnl_') and method.name in ["as_ref"])
or (typedef.name.startswith('itk') and method.name in names)):
self.warn(
3, "ignoring black listed method '%s::%s'." %
(typedef.name, method.name), w)
return
# iterate over the arguments
args = []
for arg in method.arguments:
s = "%s %s" % (self.get_alias(self.getDeclarationString(arg), w), arg.name)
if "(" in s:
self.warn(
1, "ignoring method not supported by swig '%s::%s'." %
(typedef.name, method.name), w)
return
# append the default value if it exists
if arg.default_value:
s += " = %s" % arg.default_value
# and add the string to the arg list
args.append(s)
# find the method decorators
static = ""
const = ""
if method.has_static:
static = "static "
if method.has_const:
const = " const"
if method.virtuality != "not virtual":
static += "virtual "
if method.virtuality == "pure virtual":
const += " = 0"
self.outputFile.write(" " * indent)
self.outputFile.write(
" %s%s %s(%s)%s;\n" %
(static,
self.get_alias(
self.getDeclarationString(
method.return_type),
w),
method.name,
", ".join(args),
const))
# Check the method arguments for std::string passed by reference.
# In this case, save the name of the argument in the applyFileNames list
# for further usage.
for arg in method.arguments:
dtype = arg.decl_type
if pygccxml.declarations.is_reference(dtype) and \
pygccxml.declarations.is_const(
pygccxml.declarations.remove_reference(dtype)) is False and \
pygccxml.declarations.is_std_string(dtype):
self.applyFileNames.append(arg.name)
def generate_headerfile(self, idxFile, wrappersNamespace):
# and begin to write the output
headerFile = StringIO()
headerFile.write("// This file is automatically generated.\n")
headerFile.write("// Do not modify this file manually.\n\n\n")
langs = [
"CHICKEN",
"CSHARP",
"GUILE",
"JAVA",
"LUA",
"MODULA3",
"MZSCHEME",
"OCAML",
"PERL",
"PERL5",
"PHP",
"PHP4",
"PHP5",
"PIKE",
"PYTHON",
"R",
"RUBY",
"SEXP",
"TCL",
"XML"]
# first, define the module
# [1:-1] is there to drop the quotes
for lang in langs:
headerFile.write("#ifdef SWIG%s\n" % lang)
headerFile.write("%%module %s%s\n" % (self.moduleName, lang.title()))
headerFile.write("#endif\n")
headerFile.write('\n')
# add the includes
# use a set to avoid putting many times the same include
s = set()
headerFile.write("%{\n")
# the include files passed in option
include = self.moduleName + 'SwigInterface.h'
i = '#include "%s"' % include
if i not in s:
headerFile.write(i + '\n')
s.add(i)
headerFile.write("%}\n\n\n")
# load the aliases files
headerFile.write("%{\n")
self.load_idx(idxFile)
# and the idx files in the mdx ones
for f in self.options.mdx:
self.load_mdx(f)
# iterate over all the typedefs in the _wrapping_::wrappers namespace
# to fill the alias dict
for typedef in wrappersNamespace.typedefs(): # allow_empty=True):
s = getType(typedef).decl_string
# drop the :: prefix - it make swig produce invalid code
if s.startswith("::"):
s = s[2:]
if s not in self.aliases:
self.warn(
2, "%s (%s) should be already defined in the idx files." %
(s, typedef.name))
self.aliases[s] = typedef.name
# declare the typedef
headerFile.write("typedef %s %s;\n" % (s, typedef.name))
headerFile.write("%}\n\n\n")
return headerFile
def generate_importfile(self, usedSources):
# add the imports
importFile = StringIO()
for f in self.options.imports:
importFile.write("%%import %s\n" % f)
importFile.write("\n\n")
for src in usedSources:
importFile.write("%%import %s.i\n" % src)
importFile.write('\n\n')
return importFile
def generate_includefile(self):
# add the swig includes
includeFile = StringIO()
includeFile.write("%include itk.i\n")
for f in options.swig_includes:
includeFile.write("%%include %s\n" % f)
includeFile.write("%%include %s\n" % (self.moduleName + "_ext.i"))
includeFile.write('\n\n')
return includeFile
def generate_applyfile(self):
# When a std::string is passed by reference, we need to add the %apply
# line with the argument name, and the INOUT command.
# Use a set() to remove duplicates, this will work event if we got
# multiple functions with the same argument name in the same .i file
# (swig should take care of it).
applyFileNames = set(self.applyFileNames)
# Apply file, for passing std::string as reference in methods
applyFile = StringIO()
for name in applyFileNames:
applyFile.write(
"%apply (std::string& INOUT) { std::string & " + name + "};\n")
applyFile.write("\n\n")
return applyFile
def create_typedefheader(self, usedSources):
# create the typedef header
typedefFile = StringIO()
typedefFile.write("#ifndef __%sSwigInterface_h\n" % self.moduleName)
typedefFile.write("#define __%sSwigInterface_h\n" % self.moduleName)
typedefInput = os.path.join(options.library_output_dir,
self.moduleName + 'SwigInterface.h.in')
with open(typedefInput, "r") as f:
typedefFile.write(f.read() + '\n')
for src in usedSources:
typedefFile.write('#include "%sSwigInterface.h"\n' % src)
typedefFile.write("#endif\n")
typedefOutput = os.path.join(options.interface_output_dir,
self.moduleName + 'SwigInterface.h')
with open(typedefOutput, "w") as f:
f.write(typedefFile.getvalue())
def create_interfacefile(self, interfaceFile, idxFile, wrappersNamespace):
headerFile = self.generate_headerfile(idxFile, wrappersNamespace)
# iterate over all the typedefs in the _wrapping_::wrappers namespace
# to build a list of classes with the dependecies
# classes :: [(name, [dep_name], typedef)]
classes = []
for typedef in wrappersNamespace.typedefs():
# begin a new class
if isinstance(
getType(typedef),
pygccxml.declarations.class_declaration.class_t):
classes.append((
typedef.name,
[self.get_alias(super_class.related_class.decl_string) for
super_class in getType(typedef).bases], typedef))
elif isinstance(
getType(typedef),
pygccxml.declarations.enumeration.enumeration_t):
# warn( 6, "Enum are currently supported only nested in a
# class." )
self.generate_enum(typedef)
else:
self.warn(
5, "Unknown type type: %s" % str(typedef.decl_type.declaration))
# copy the classes in a new ordered list, according to the dependencies
# classes is sorted to be sure to always get the same result everywhere
name_local_classes = [c[0] for c in classes]
classes = sorted(classes)
name_already_in_typedefs = []
typedefs = []
while len(classes) != 0:
nclasses = []
for name, deps, typedef in classes:
ok = True
for d in deps:
if d in name_local_classes and d not in name_already_in_typedefs:
ok = False
if ok:
name_already_in_typedefs.append(name)
typedefs.append(typedef)
else:
nclasses.append((name, deps, typedef))
classes = nclasses
# now really generate the swig interface
for typedef in typedefs:
# begin a new class
self.generate_class(typedef)
if len(self.warnings) > 0 and self.options.warningError:
sys.exit(1)
# search the files to import
usedSources = set()
for alias in self.usedTypes:
if alias in self.typedefSource:
idxName = os.path.basename(self.typedefSource[alias])
iName = idxName[:-len(".idx")]
usedSources.add(iName)
outputFileName = os.path.basename(interfaceFile)
if outputFileName in usedSources:
usedSources.remove(outputFileName)
importFile = self.generate_importfile(usedSources)
includeFile = self.generate_includefile()
applyFile = self.generate_applyfile()
self.create_typedefheader(usedSources)
# finally, really write the output
content = headerFile.getvalue() + importFile.getvalue() + \
includeFile.getvalue() + applyFile.getvalue() + self.outputFile.getvalue()
if self.options.keep and os.path.exists(interfaceFile):
with open(interfaceFile, "r") as f:
filecontent = f.read()
if self.options.keep and os.path.exists(interfaceFile) and \
filecontent == content:
self.info("%s unchanged." % interfaceFile)
else:
self.info("Writing %s." % interfaceFile)
with open(interfaceFile, "w") as f:
f.write(content)
if __name__ == '__main__':
argParser = ArgumentParser()
argParser.add_argument(
"--mdx",
action="append",
dest="mdx",
default=[],
metavar="FILE",
help="master idx file to be used.")
argParser.add_argument(
"--import",
action="append",
dest="imports",
default=[],
metavar="FILE",
help="File to be imported in the generated interface file.")
argParser.add_argument(
"--swig-include",
action="append",
dest="swig_includes",
default=[],
metavar="FILE",
help=(
"File to be included by swig (%include) in the generated "
"interface file."))
argParser.add_argument(
"-w",
"--disable-warning",
action="append",
dest="warnings",
default=[],
metavar="WARNING",
help="Warning to be disabled.")
argParser.add_argument(
"-A",
"--disable-access-warning",
action="append",
dest="access_warnings",
default=[],
metavar="LEVEL",
help=(
"Access level where warnings are disabled "
"(public, protected, private)."))
argParser.add_argument(
"-W",
"--warning-error",
action="store_true",
dest="warningError",
help="Treat warnings as errors.")
argParser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Log what is currently done.")
argParser.add_argument(
"-k",
"--keep",
action="store_true",
dest="keep",
help="Don't rewrite the output file if the content is unchanged.")
argParser.add_argument(
"-p",
"--pygccxml-path",
action="store",
dest="pygccxml_path",
help="Path to pygccxml")
argParser.add_argument(
"-g",
"--castxml-path",
action="store",
dest="castxml_path",
help="Path to castxml")
argParser.add_argument(
"-o",
"--interface-output-dir",
action="store",
dest="interface_output_dir",
help="Directory to write the Swig input files")
argParser.add_argument(
"-l",
"--library-output-dir",
action="store",
dest="library_output_dir",
help="Directory to read the xml abstract syntax tree input files")
argParser.add_argument(
"-s",
"--submodule-order",
action="store",
dest="submodule_order",
help="List of submodules that must be wrapped in the given order")
options = argParser.parse_args()
sys.path.insert(1, options.pygccxml_path)
import pygccxml
import logging
# init the pygccxml stuff
pygccxml.utils.loggers.cxx_parser.setLevel(logging.CRITICAL)
pygccxml.declarations.scopedef_t.RECURSIVE_DEFAULT = False
pygccxml.declarations.scopedef_t.ALLOW_EMPTY_MDECL_WRAPPER = True
pygccxml_config = pygccxml.parser.config.xml_generator_configuration_t(
xml_generator_path=options.castxml_path,
xml_generator="castxml")
moduleNames = []
# The first mdx file is the master index file for this module.
with open(options.mdx[0], 'r') as ff:
for line in ff.readlines():
stripped = line.strip()
if line.startswith('%') or line.isspace():
# exclude the lines which are starting with % - that's not the idx
# files
pass
elif stripped.endswith(".mdx"):
pass
else:
moduleName = stripped.rsplit('.')[0]
if moduleName.startswith('(const char*)'):
moduleName = moduleName[len('(const char*)'):]
moduleName = moduleName.strip('"')
moduleNames.append(moduleName)
def generate_wrapping_namespace(moduleName):
xmlFilePath = os.path.join(options.library_output_dir,
moduleName + '.xml')
pygccxml_reader = pygccxml.parser.source_reader.source_reader_t(
pygccxml_config)
abstractSyntaxTree = pygccxml_reader.read_xml_file(xmlFilePath)
globalNamespace = pygccxml.declarations.get_global_namespace(abstractSyntaxTree)
wrappingNamespace = globalNamespace.namespace('_wrapping_')
return wrappingNamespace.namespace('wrappers')
wrappingNamespaces = dict()
# Limit the number of cached, parsed abstract syntax trees to avoid very
# high memory usage
wrappingCacheLength = min(len(moduleNames), 20)
for ii in range(wrappingCacheLength):
moduleName = moduleNames[ii]
wrappingNamespace = generate_wrapping_namespace(moduleName)
wrappingNamespaces[moduleName] = wrappingNamespace
for moduleName in moduleNames:
if moduleName in wrappingNamespaces:
wrappersNamespace = wrappingNamespaces[moduleName]
else:
wrappersNamespace = generate_wrapping_namespace(moduleName)
idxFilePath = os.path.join(options.interface_output_dir,
moduleName + '.idx')
idx_generator = IdxGenerator(moduleName)
idx_generator.create_idxfile(idxFilePath, wrappersNamespace)
def generate_swig_input(moduleName):
if moduleName in wrappingNamespaces:
wrappersNamespace = wrappingNamespaces[moduleName]
else:
wrappersNamespace = generate_wrapping_namespace(moduleName)
idxFilePath = os.path.join(options.interface_output_dir,
moduleName + '.idx')
swigInputFilePath = os.path.join(options.interface_output_dir,
moduleName + '.i')
swig_input_generator = SwigInputGenerator(moduleName, options)
swig_input_generator.create_interfacefile(swigInputFilePath, idxFilePath,
wrappersNamespace)
if options.submodule_order:
for moduleName in options.submodule_order.split(';'):
generate_swig_input(moduleName)
moduleNames.remove(moduleName)
for moduleName in moduleNames:
generate_swig_input(moduleName)
|
{
"content_hash": "b057a4116df1a332858b7cbe69a4a1ac",
"timestamp": "",
"source": "github",
"line_count": 999,
"max_line_length": 90,
"avg_line_length": 38.905905905905904,
"alnum_prop": 0.5414876373272957,
"repo_name": "PlutoniumHeart/ITK",
"id": "f41e99d7129a1d8c92acd81f310309f0fa22440c",
"size": "38986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wrapping/Generators/SwigInterface/igenerator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "306"
},
{
"name": "C",
"bytes": "30635310"
},
{
"name": "C++",
"bytes": "47011599"
},
{
"name": "CMake",
"bytes": "2183114"
},
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "DIGITAL Command Language",
"bytes": "709"
},
{
"name": "Fortran",
"bytes": "2260380"
},
{
"name": "HTML",
"bytes": "208777"
},
{
"name": "Io",
"bytes": "1833"
},
{
"name": "Java",
"bytes": "28598"
},
{
"name": "Lex",
"bytes": "6948"
},
{
"name": "Makefile",
"bytes": "267990"
},
{
"name": "Objective-C",
"bytes": "43946"
},
{
"name": "Objective-C++",
"bytes": "6591"
},
{
"name": "OpenEdge ABL",
"bytes": "85244"
},
{
"name": "Perl",
"bytes": "18085"
},
{
"name": "Python",
"bytes": "939926"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "131549"
},
{
"name": "Tcl",
"bytes": "74786"
},
{
"name": "XSLT",
"bytes": "195448"
},
{
"name": "Yacc",
"bytes": "20591"
}
],
"symlink_target": ""
}
|
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
notification.py: Parser for notification events received from Controller
"""
import os
import re
import xmltodict
from pybvc.common.utils import dbg_print
yang_namespace_to_prefix_map = {
'urn:opendaylight:inventory': 'inv',
'urn:opendaylight:netconf-node-inventory': 'netinv"',
'urn:opendaylight:flow:inventory': 'flownode',
'urn:opendaylight:flow:statistics': 'fstat',
'urn:opendaylight:flow:table:statistics': 'flowstat',
'urn:opendaylight:port:statistics': 'portstat',
'urn:TBD:params:xml:ns:yang:network-topology': 'nt',
'urn:opendaylight:model:topology:inventory': 'nt1',
'urn:opendaylight:host-tracker': 'host-track',
}
def yang_nsname_to_prefix(nsname):
if nsname in yang_namespace_to_prefix_map:
return yang_namespace_to_prefix_map[nsname]
else:
return nsname
def yang_prefix_to_nsname(prefix):
for k, v in yang_namespace_to_prefix_map:
if v == prefix:
return k
return prefix
class NetworkTopologyChangeNotification():
""" Parser for notification messages generated by the Controller
when it detects changes in the network topology data tree.
"""
def __init__(self, event):
self.added_switches = []
self.removed_switches = []
self.added_hosts = []
self.removed_hosts = []
self.added_links = []
self.removed_links = []
d = xmltodict.parse(event)
try:
p1 = 'notification'
notification = d[p1]
p2 = 'eventTime'
self.timestamp = notification[p2]
self.events = []
p3 = 'data-changed-notification'
p4 = 'data-change-event'
events = notification[p3][p4]
if isinstance(events, list):
for item in events:
tc_evt = TopoChangeEvent(item)
self.events.append(tc_evt)
elif isinstance(events, dict):
tc_evt = TopoChangeEvent(events)
self.events.append(tc_evt)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(events, type(events)))
dbg_print(msg)
for event in self.events:
if event.created():
if event.is_switch():
self.added_switches.append(event.get_node_id())
elif event.is_host():
self.added_hosts.append(event.get_node_id())
elif event.is_link():
self.added_links.append(event.get_link_id())
elif event.deleted():
if event.is_switch():
self.removed_switches.append(event.get_node_id())
elif event.is_host():
self.removed_hosts.append(event.get_node_id())
elif event.is_link():
self.removed_links.append(event.get_link_id())
except(Exception):
msg = "DEBUG: failed to process event '%s'" % event
dbg_print(msg)
def get_time(self):
return self.timestamp
def switches_added(self):
return self.added_switches
def switches_removed(self):
return self.removed_switches
def hosts_added(self):
return self.added_hosts
def hosts_removed(self):
return self.removed_hosts
def links_added(self):
return self.added_links
def links_removed(self):
return self.removed_links
def print_events(self):
for event in self.events:
if event.is_link():
print "\n".strip()
event.do_print()
print "\n".strip()
else:
print "\n".strip()
event.do_print()
print "\n".strip()
class TopoChangeEvent():
""" Parser for the data change event located in the network topology
change notification message received from the Controller.
Helper subclass for the 'NetworkTopologyChangeNotification' class.
"""
def __init__(self, event):
p = 'path'
if isinstance(event, dict):
for k, v in event.items():
if k == p:
self.path_info = PathInfo(v)
else:
setattr(self, k, v)
else:
msg = ("DEBUG: event=%s, unexpected data format '%s'" %
(event, type(event)))
dbg_print(msg)
def created(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'created')
return res
def deleted(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'deleted')
return res
def updated(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'updated')
return res
def get_path(self):
path = None
p = 'path_info'
if hasattr(self, p):
path = str(self.path_info.path)
return path
def is_node(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*node-id$'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def is_switch(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('openflow'):
res = True
return res
def is_host(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('host'):
res = True
return res
def get_node_id(self):
node_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'node-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
node_id = s[idx + len(p):].translate(None, "[]'\"")
break
return node_id
def is_link(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*link-id$'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def get_link_id(self):
link_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'link-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
link_id = s[idx + len(p):].translate(None, "[]'\"")
break
return link_id
def do_print(self):
w = 65
print ("%s" % '<' * w)
print " operation: %s" % self.operation
self.path_info.do_print()
print ("%s" % '>' * w)
class InventoryChangeNotification():
""" Parser for notification messages generated by the Controller
when it detects changes in its internal inventory data store.
"""
def __init__(self, event):
self.added_nodes = []
self.removed_nodes = []
self.added_flows = []
self.removed_flows = []
d = xmltodict.parse(event)
try:
p1 = 'notification'
notification = d[p1]
p2 = 'eventTime'
self.timestamp = notification[p2]
self.events = []
p3 = 'data-changed-notification'
p4 = 'data-change-event'
events = notification[p3][p4]
if isinstance(events, list):
for item in events:
evt = InventoryChangeEvent(item)
self.events.append(evt)
elif isinstance(events, dict):
evt = InventoryChangeEvent(events)
self.events.append(evt)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(events, type(events)))
dbg_print(msg)
for event in self.events:
if event.created():
if event.is_node():
self.added_nodes.append(event.get_node_id())
elif event.is_flow_entry():
flow_info = FlowInfo(event)
self.added_flows.append(flow_info)
elif event.deleted():
if event.is_node():
self.removed_nodes.append(event.get_node_id())
elif event.is_flow_entry():
flow_info = FlowInfo(event)
self.removed_flows.append(flow_info)
except(Exception) as e:
print "Error, %s" % e
def get_time(self):
return self.timestamp
def nodes_added(self):
return self.added_nodes
def nodes_removed(self):
return self.removed_nodes
def flows_added(self):
return self.added_flows
def flows_removed(self):
return self.removed_flows
def print_events(self):
for event in self.events:
if event.created():
print "\n".strip()
event.do_print()
print "\n".strip()
class InventoryChangeEvent():
""" Parser for the data change event located in the inventory change
notification message received from the Controller.
Helper subclass for the 'InventoryChangeNotification' class.
"""
def __init__(self, event):
self.path_info = None
p = 'path'
if isinstance(event, dict):
for k, v in event.items():
if k == p:
self.path_info = PathInfo(v)
else:
setattr(self, k, v)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(event, type(event)))
dbg_print(msg)
def created(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'created')
return res
def deleted(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'deleted')
return res
def updated(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'updated')
return res
def get_path(self):
path = None
p = 'path_info'
if hasattr(self, p):
path = str(self.path_info.path)
return path
def is_node(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = 'node\[.*:id=.*\]'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def is_switch(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('openflow'):
res = True
return res
def get_node_id(self):
node_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = str(path).split('[')
p = ':id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
node_id = s[idx + len(p):].translate(None, "[]'\"")
break
return node_id
def is_flow_entry(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = 'flow\[.*:id=.*\]'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def get_flow_entry_id(self):
flow_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = str(path).split('[')
p = ':id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
flow_id = s[idx + len(p):].translate(None, "[]'\"")
return flow_id
def do_print(self):
w = 65
print ("%s" % '<' * w)
print " operation: %s" % self.operation
self.path_info.do_print()
print ("%s" % '>' * w)
class FlowInfo():
def __init__(self, event):
self.node_id = None
self.table_id = None
self.flow_id = None
if isinstance(event, InventoryChangeEvent) and event.is_flow_entry():
path = event.get_path()
try:
chunks = path.split('/')
l = []
p = ':id='
for s in chunks:
idx = s.find(p)
if idx >= 0:
l.append(s[idx + len(p):].translate(None, "'[]"))
self.node_id = l[0]
self.table_id = l[1]
self.flow_id = l[2]
except(Exception):
msg = "DEBUG: unexpected string format: %s" % path
dbg_print(msg)
else:
msg = "wrong class usage"
dbg_print(msg)
def to_string(self):
s = ("{node='%s', table='%s', flowid='%s'}" %
(self.node_id, self.table_id, self.flow_id))
return s
class PathInfo():
""" Represents the path to the node in the Controller's internal
data tree where the change has been detected.
Helper subclass for the 'NetworkTopologyChangeNotification'
and 'InventoryChangeNotification' classes.
"""
def __init__(self, info):
self.namespaces = None
self.path = None
if isinstance(info, dict):
p1 = '#text'
p2 = '@xmlns'
try:
path = info[p1]
namespaces = []
for k, v in info.items():
if k.startswith(p2):
pfx = yang_nsname_to_prefix(v)
d = {'ns': v, 'pfx': pfx}
namespaces.append(d)
nickname = k.split(':')[-1]
path = path.replace(nickname, pfx)
self.namespaces = namespaces
self.path = path
except:
msg = "DEBUG: failed to process info '%s'" % info
dbg_print(msg)
elif isinstance(info, basestring):
self.path = info
else:
msg = "DEBUG: info=%s, " \
"unexpected data format '%s'" % (info, type(info))
dbg_print(msg)
def do_print(self):
for ns in self.namespaces:
print " namespace: %s (prefix: %s)" % (ns['ns'], ns['pfx'])
print " path: %s" % self.path
|
{
"content_hash": "1ccd899083d4d8dd0606004101e7afd4",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 77,
"avg_line_length": 30.09090909090909,
"alnum_prop": 0.47878902316213495,
"repo_name": "jebpublic/pybvc",
"id": "5c25e16b84a72770d11bf8b5345663afd373d484",
"size": "17444",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pybvc/controller/notification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "436853"
}
],
"symlink_target": ""
}
|
import logging
import os.path
import sys
import time
import tempfile
mm_start = time.clock()
logging.raiseExceptions = False
logging.basicConfig(level=logging.INFO)
logging_handler = logging.FileHandler(tempfile.gettempdir()+"/mm.log")
#suds log setup
suds_logger = logging.getLogger('suds.client')
suds_logger.setLevel(logging.WARN)
suds_logger.propagate = False
suds_logger.addHandler(logging_handler)
#mm log setup
logger = logging.getLogger('mm')
logger.setLevel(logging.ERROR)
logger.propagate = False
logger.addHandler(logging_handler)
#request log setup
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.ERROR)
def __get_base_path():
if hasattr(sys, 'frozen'):
return sys._MEIPASS
else:
return os.path.dirname(os.path.dirname(__file__))
def __get_is_frozen():
if hasattr(sys, 'frozen'):
return True
else:
return False
frozen = __get_is_frozen()
base_path = __get_base_path()
connection = None
|
{
"content_hash": "f77e72c71eb16b0664e6ad1d6a7febca",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 70,
"avg_line_length": 22.34090909090909,
"alnum_prop": 0.7232960325534079,
"repo_name": "bfagundez/apex_paperboy",
"id": "96a95d9be47cf9189a3d92ac0666b189cb5347f2",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "492271"
},
{
"name": "JavaScript",
"bytes": "1814624"
},
{
"name": "Python",
"bytes": "263701"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
import itertools
import os
import json
import random
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
from django.conf import settings
import pydenticon
import requests
from lib.video.tasks import resize_video
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.base import STATUS_CHOICES_API_LOOKUP
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.developers.models import AddonPaymentAccount, PaymentAccount
from mkt.developers.providers import Reference
from mkt.developers.tasks import resize_preview, save_icon
from mkt.prices.models import AddonPremium, Price
from mkt.ratings.models import Review
from mkt.ratings.tasks import addon_review_aggregates
from mkt.reviewers.models import RereviewQueue
from mkt.site.utils import app_factory, slugify, version_factory
from mkt.users.models import UserProfile
from mkt.users.utils import create_user
from mkt.webapps.models import AddonUser, AppManifest, Preview, Webapp
adjectives = [u'Exquisite', u'Delicious', u'Elegant', u'Swanky', u'Spicy',
u'Food Truck', u'Artisanal', u'Tasty', u'Questionable', u'Drôle']
nouns = [u'Sandwich', u'Pizza', u'Curry', u'Pierogi', u'Sushi', u'Salad',
u'Stew', u'Pasta', u'Barbeque', u'Bacon', u'Pancake', u'Waffle',
u'Chocolate', u'Gyro', u'Cookie', u'Burrito', 'Pie', u'Crème brûlée',
u'пельмень']
fake_app_names = list(itertools.product(adjectives, nouns))
def generate_app_data(num, skip_names=()):
skip_names = set(skip_names)
def _names():
for name in fake_app_names:
ns = u' '.join(name)
if ns not in skip_names:
yield ns
repeat = 1
while True:
for name in fake_app_names:
ns = u' '.join(name + (str(repeat),))
if ns not in skip_names:
yield ns
repeat += 1
cats = itertools.cycle([c[0] for c in CATEGORY_CHOICES])
pairs = itertools.izip(_names(), cats)
return itertools.islice(pairs, num)
foreground = ["rgb(45,79,255)",
"rgb(254,180,44)",
"rgb(226,121,234)",
"rgb(30,179,253)",
"rgb(232,77,65)",
"rgb(49,203,115)",
"rgb(141,69,170)"]
def generate_icon(app):
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(unicode(app.name), 128, 128,
output_format="png")
save_icon(app, img)
def generate_previews(app, n=1):
gen = pydenticon.Generator(8, 12, foreground=foreground,
digest=hashlib.sha512)
for i in range(n):
img = gen.generate(unicode(app.name) + unichr(i), 320, 480,
output_format="png")
p = Preview.objects.create(addon=app, filetype="image/png",
thumbtype="image/png",
caption="screenshot " + str(i),
position=i)
f = tempfile.NamedTemporaryFile(suffix='.png')
f.write(img)
f.flush()
resize_preview(f.name, p)
lang_prefixes = {
'fr': u'fran\xe7ais',
'es-ES': u'espa\xf1ol',
'ru': u'\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
'ja': u'\u65e5\u672c\u8a9e',
'pt-BR': u'portugu\xeas',
'rtl': u'(RTL)',
'en-US': u''
}
def generate_localized_names(name, langs):
names = {}
for lang in langs:
prefix = lang_prefixes[lang]
if prefix:
name = u'%s %s' % (prefix, name)
names[lang] = name
return names
def generate_ratings(app, num):
for n in range(num):
email = 'testuser%s@example.com' % (n,)
user, _ = UserProfile.objects.get_or_create(
email=email, source=mkt.LOGIN_SOURCE_UNKNOWN,
display_name=email)
Review.objects.create(
addon=app, user=user, rating=random.randrange(1, 6),
title="Test Review " + str(n), body="review text")
def generate_hosted_app(name, categories, developer_name,
privacy_policy=None, device_types=(), status=4,
rated=True, uses_flash=False, default_locale='en-US',
**spec):
generated_url = 'http://%s.testmanifest.com/fake-data/manifest.webapp' % (
slugify(name),)
a = app_factory(categories=categories, name=name, complete=False,
privacy_policy=spec.get('privacy_policy'),
file_kw={'status': status, 'uses_flash': uses_flash},
default_locale=default_locale, rated=rated,
manifest_url=spec.get('manifest_url', generated_url))
if device_types:
for dt in device_types:
a.addondevicetype_set.create(device_type=DEVICE_CHOICES_IDS[dt])
else:
a.addondevicetype_set.create(device_type=1)
a.versions.latest().update(reviewed=datetime.datetime.now(),
_developer_name=developer_name)
if 'manifest_file' in spec:
AppManifest.objects.create(
version=a._latest_version,
manifest=open(spec['manifest_file']).read())
else:
generate_hosted_manifest(a)
return a
def generate_hosted_manifest(app):
data = {
'name': unicode(app.name),
'description': 'This app has been automatically generated',
'version': '1.0',
'icons': {
'16': 'http://testmanifest.com/icon-16.png',
'48': 'http://testmanifest.com/icon-48.png',
'128': 'http://testmanifest.com/icon-128.png'
},
'installs_allowed_from': ['*'],
'developer': {
'name': 'Marketplace Team',
'url': 'https://marketplace.firefox.com/credits'
}
}
AppManifest.objects.create(
version=app._latest_version, manifest=json.dumps(data))
def generate_app_package(app, out, apptype, permissions, namedict,
default_locale='en-US', version='1.0'):
manifest = {
'version': version.version,
'name': unicode(app.name),
'description': ('This packaged app has been automatically generated'
' (version %s)' % (version.version,)),
'icons': {
'16': '/icons/16.png',
'32': '/icons/32.png',
'256': '/icons/256.png'
},
'developer': {
'name': 'Marketplace Team',
'url': 'https://marketplace.firefox.com/credits'
},
'installs_allowed_from': ['*'],
'launch_path': '/index.html',
'locales': dict((lang, {
'name': lang,
'description': 'This packaged app has been automatically generated'
}) for lang, name in namedict.items()),
'permissions': dict(((k, {"description": k})
for k in permissions)),
'default_locale': default_locale,
'orientation': 'landscape',
'type': 'web' if apptype == 'packaged' else apptype,
'fullscreen': 'true'
}
outz = ZipFile(file=out, mode='w', compression=ZIP_DEFLATED)
try:
for size in ('32', 'med'):
outz.writestr(
'icons/%s.png' % (size,),
open(os.path.join(
settings.MEDIA_ROOT,
'img/app-icons/%s/generic.png' % (size,))).read())
outz.writestr('script.js',
'document.onload=function() {alert("Hello!");};')
outz.writestr(
'index.html',
'<title>Packaged app</title><script src="script.js"></script>'
'<h1>Test packaged app</h1>')
outz.writestr("manifest.webapp", json.dumps(manifest))
finally:
outz.close()
AppManifest.objects.create(
version=version, manifest=json.dumps(manifest))
def generate_packaged_app(namedict, apptype, categories, developer_name,
privacy_policy=None, device_types=(),
permissions=(), versions=(),
default_locale='en-US', package_file=None,
status=4, uses_flash=False, **kw):
now = datetime.datetime.now()
app = app_factory(categories=categories, name=namedict[default_locale],
complete=False, rated=True, is_packaged=True,
privacy_policy=privacy_policy,
version_kw={
'version': '1.0',
'reviewed': now if status >= 4 else None,
'_developer_name': developer_name},
file_kw={'status': status, 'uses_flash': uses_flash})
if device_types:
for dt in device_types:
app.addondevicetype_set.create(device_type=DEVICE_CHOICES_IDS[dt])
else:
app.addondevicetype_set.create(device_type=1)
f = app.latest_version.all_files[0]
f.update(filename=f.generate_filename())
fp = os.path.join(app.latest_version.path_prefix, f.filename)
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
if package_file:
return app
with open(fp, 'w') as out:
generate_app_package(app, out, apptype,
permissions, namedict,
version=app.latest_version)
for i, vspec in enumerate(versions, 1):
st = STATUS_CHOICES_API_LOOKUP[vspec.get("status", "public")]
rtime = (now + datetime.timedelta(i))
v = version_factory(version="1." + str(i), addon=app,
reviewed=rtime if st >= 4 else None,
nomination=rtime if st > 0 else None,
created=rtime,
file_kw={'status': st},
_developer_name=developer_name)
f = v.files.all()[0]
f.update(filename=f.generate_filename())
fp = os.path.join(app.latest_version.path_prefix, f.filename)
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
with open(fp, 'w') as out:
generate_app_package(app, out, vspec.get("type", apptype),
vspec.get("permissions", permissions),
namedict, version=v)
app.update_version()
return app
def get_or_create_payment_account(email='fakedeveloper@example.com',
name='Fake App Developer'):
user, _ = UserProfile.objects.get_or_create(
email=email,
source=mkt.LOGIN_SOURCE_UNKNOWN,
display_name=name)
try:
acct = PaymentAccount.objects.get(user=user)
except PaymentAccount.DoesNotExist:
acct = Reference().account_create(
user, {'account_name': name, 'name': name, 'email': email})
return acct
def get_or_create_price(tier):
return Price.objects.get_or_create(price=tier, active=True)[0]
def generate_apps(hosted=0, packaged=0, privileged=0, versions=('public',),
**spec_data):
apps_data = generate_app_data(hosted + packaged + privileged)
specs = []
for i, (appname, cat_slug) in enumerate(apps_data):
if i < privileged:
spec = {'name': appname,
'type': 'privileged',
'status': versions[0],
'permissions': ['camera', 'storage'],
'categories': [cat_slug],
'versions': versions,
'num_ratings': 5,
'num_previews': 2}
elif i < (privileged + packaged):
spec = {'name': appname,
'type': 'packaged',
'status': versions[0],
'categories': [cat_slug],
'versions': versions,
'num_ratings': 5,
'num_previews': 2}
else:
spec = {'name': appname,
'type': 'hosted',
'status': versions[0],
'categories': [cat_slug],
'num_ratings': 5,
'num_previews': 2}
spec.update(spec_data)
specs.append(spec)
return generate_apps_from_specs(specs, None)
GENERIC_DESCRIPTION = ""
def generate_apps_from_specs(specs, specdir, repeats=1):
global GENERIC_DESCRIPTION
apps = []
specs = specs * repeats
GENERIC_DESCRIPTION = requests.get('http://baconipsum.com/api/'
'?type=meat-and-filler¶s=2'
'&start-with-lorem=1').json()[0]
existing = [unicode(w.name) for w in Webapp.with_deleted.all()]
data = zip(specs, generate_app_data(len(specs), skip_names=existing))
for spec, (appname, cat_slug) in data:
spec = spec.copy()
if spec.get('preview_files'):
spec['preview_files'] = [os.path.join(specdir, p)
for p in spec['preview_files']]
if spec.get('video_files'):
spec['video_files'] = [os.path.join(specdir, p)
for p in spec['video_files']]
if spec.get('package_file'):
spec['package_file'] = os.path.join(specdir, spec['package_file'])
if spec.get('manifest_file'):
spec['manifest_file'] = os.path.join(specdir,
spec['manifest_file'])
spec['name'] = spec.get('name', appname)
spec['categories'] = spec.get('categories', [cat_slug])
apps.append(generate_app_from_spec(**spec))
return apps
def generate_app_from_spec(name, categories, type, status, num_previews=1,
num_ratings=1, locale_names=('en-US', 'es-ES'),
preview_files=(),
video_files=(),
developer_name='Fake App Developer',
developer_email='fakedeveloper@example.com',
privacy_policy='Fake privacy policy',
premium_type='free', description=None,
default_locale='en-US', rereview=False,
uses_flash=False, special_regions={}, **spec):
status = STATUS_CHOICES_API_LOOKUP[status]
names = generate_localized_names(name, locale_names)
if type == 'hosted':
app = generate_hosted_app(
names[default_locale], categories, developer_name, status=status,
default_locale=default_locale, **spec)
else:
app = generate_packaged_app(
names, type, categories, developer_name,
default_locale=default_locale, status=status, **spec)
generate_icon(app)
if not preview_files:
generate_previews(app, num_previews)
if video_files:
for i, f in enumerate(video_files):
p = Preview.objects.create(addon=app, filetype="video/webm",
thumbtype="image/png",
caption="video " + str(i),
position=i)
resize_video(f, p)
if preview_files:
for i, f in enumerate(preview_files):
p = Preview.objects.create(addon=app, filetype="image/png",
thumbtype="image/png",
caption="screenshot " + str(i),
position=i + len(video_files))
resize_preview(f, p)
generate_ratings(app, num_ratings)
app.name = names
if not description:
description = GENERIC_DESCRIPTION
app.description = description
app.privacy_policy = privacy_policy
app.support_email = developer_email
premium_type = mkt.ADDON_PREMIUM_API_LOOKUP[premium_type]
app.premium_type = premium_type
app.default_locale = default_locale
if premium_type != mkt.ADDON_FREE and status != mkt.STATUS_NULL:
acct = get_or_create_payment_account(developer_email, developer_name)
product_uri = Reference().product_create(acct, app)
AddonPaymentAccount.objects.create(addon=app, payment_account=acct,
account_uri=acct.uri,
product_uri=product_uri)
price = get_or_create_price(spec.get('price', '0.99'))
AddonPremium.objects.create(addon=app, price=price)
for optField in ('support_url', 'homepage', 'is_offline'):
if optField in spec:
setattr(app, optField, spec[optField])
# Status has to be updated at the end because STATUS_DELETED apps can't
# be saved.
app.status = status
app.save()
for (region, region_status) in special_regions.iteritems():
app.geodata.update(**{'region_%s_nominated' % (region,):
datetime.datetime.now(),
'region_%s_status' % (region,):
STATUS_CHOICES_API_LOOKUP[region_status]})
addon_review_aggregates(app.pk)
if rereview:
RereviewQueue.objects.get_or_create(addon=app)
try:
u = UserProfile.objects.get(email=developer_email)
except UserProfile.DoesNotExist:
u = create_user(developer_email)
u.display_name = developer_name
u.save()
AddonUser.objects.create(user=u, addon=app)
return app
|
{
"content_hash": "b183c4cff6fa198e4d8af7713bc95760",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 79,
"avg_line_length": 39.74149659863946,
"alnum_prop": 0.5487276047015862,
"repo_name": "eviljeff/zamboni",
"id": "b3dc8de0cde870f47c344bca72619cfd093cb4fa",
"size": "17562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/webapps/fakedata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356800"
},
{
"name": "HTML",
"bytes": "2208233"
},
{
"name": "JavaScript",
"bytes": "532502"
},
{
"name": "Makefile",
"bytes": "4172"
},
{
"name": "Python",
"bytes": "3935682"
},
{
"name": "Shell",
"bytes": "10972"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
}
|
""" CSW request and response processor """
from __future__ import (absolute_import, division, print_function)
import base64
import inspect
import warnings
import StringIO
import random
from urllib import urlencode
from urllib2 import Request, urlopen
from owscapable.util import OrderedDict
from owscapable.etree import etree
from owscapable import fes
from owscapable import util
from owscapable import ows
from owscapable.iso import MD_Metadata
from owscapable.fgdc import Metadata
from owscapable.dif import DIF
from owscapable.namespaces import Namespaces
from owscapable.util import cleanup_namespaces, bind_url, add_namespaces
# default variables
outputformat = 'application/xml'
def get_namespaces():
n = Namespaces()
return n.get_namespaces()
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'
schema_location = '%s %s' % (namespaces['csw'], schema)
class CatalogueServiceWeb:
""" csw request class """
def __init__(self, url, xml=None, lang='en-US', version='2.0.2', timeout=10, skip_caps=False,
username=None, password=None):
"""
Construct and process a GetCapabilities request
Parameters
----------
- url: the URL of the CSW
- lang: the language (default is 'en-US')
- version: version (default is '2.0.2')
- timeout: timeout in seconds
- skip_caps: whether to skip GetCapabilities processing on init (default is False)
- username: username for HTTP basic authentication
- password: password for HTTP basic authentication
"""
self.url = url
self.lang = lang
self.version = version
self.timeout = timeout
self.username = username
self.password = password
self.service = 'CSW'
self.exceptionreport = None
self.owscommon = ows.OwsCommon('1.0.0')
if not skip_caps: # process GetCapabilities
if xml:
# load from the response to get _exml
self._parse_response(xml)
else:
# construct request
data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
# ServiceIdentification
val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))
self.identification=ows.ServiceIdentification(val,self.owscommon.namespace)
# ServiceProvider
val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))
self.provider=ows.ServiceProvider(val,self.owscommon.namespace)
# ServiceOperations metadata
self.operations=[]
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))
# for harmonization
self.contents = None
# FilterCapabilities
val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))
self.filters=fes.FilterCapabilities(val)
def describerecord(self, typename='csw:Record', format=outputformat):
"""
Construct and process DescribeRecord request
Parameters
----------
- typename: the typename to describe (default is 'csw:Record')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
node0 = self._setrootelement('csw:DescribeRecord')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set('outputFormat', format)
node0.set('schemaLanguage', namespaces['xs2'])
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename
self.request = node0
self._invoke()
# parse result
# TODO: process the XML Schema (you're on your own for now with self.response)
def getdomain(self, dname, dtype='parameter'):
"""
Construct and process a GetDomain request
Parameters
----------
- dname: the value of the Parameter or Property to query
- dtype: whether to query a parameter (parameter) or property (property)
"""
# construct request
dtypename = 'ParameterName'
node0 = self._setrootelement('csw:GetDomain')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if dtype == 'property':
dtypename = 'PropertyName'
etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')
self.results['type'] = util.testXMLValue(val, True)
val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))
self.results[dtype] = util.testXMLValue(val)
# get the list of values associated with the Domain
self.results['values'] = []
for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):
self.results['values'].append(util.testXMLValue(f))
def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None, esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- typenames: the typeNames to query against (default is csw:Record)
- propertyname: the PropertyName to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- sortby: property to sort results on
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
warnings.warn("""Please use the updated 'getrecords2' method instead of 'getrecords'.
The 'getrecords' method will be upgraded to use the 'getrecords2' parameters
in a future version of OWSLib.""")
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('resultType', resulttype)
node0.set('service', self.service)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)
if sortby is not None:
fes.setsortby(node1, sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):
"""
Construct and process a GetRecordById request
Parameters
----------
- id: the list of Ids
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
data = {
'service': self.service,
'version': self.version,
'request': 'GetRecordById',
'outputFormat': format,
'outputSchema': outputschema,
'elementsetname': esn,
'id': ','.join(id),
}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
self.results = {}
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary', outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: the list of constraints (OgcExpression from owslib.fes module)
- sortby: an OGC SortBy object (SortBy from owslib.fes module)
- typenames: the typeNames to query against (default is csw:Record)
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('service', self.service)
node0.set('resultType', resulttype)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
if any([len(constraints) > 0, cql is not None]):
node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))
node2.set('version', '1.1.0')
flt = fes.FilterRequest()
if len(constraints) > 0:
node2.append(flt.setConstraintList(constraints))
# Now add a CQL filter if passed in
elif cql is not None:
etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql
if sortby is not None and isinstance(sortby, fes.SortBy):
node1.append(sortby.toXML())
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
if val is not None:
self.results['nextrecord'] = int(util.testXMLValue(val, True))
else:
warnings.warn("""CSW Server did not supply a nextRecord value (it is optional), so the client
should page through the results in another way.""")
# For more info, see:
# https://github.com/geopython/OWSLib/issues/100
self.results['nextrecord'] = None
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None, bbox=None, keywords=[], cql=None, identifier=None):
"""
Construct and process a Transaction request
Parameters
----------
- ttype: the type of transaction 'insert, 'update', 'delete'
- typename: the typename to describe (default is 'csw:Record')
- record: the XML record to insert
- propertyname: the RecordProperty/PropertyName to Filter against
- propertyvalue: the RecordProperty Value to Filter against (for updates)
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- keywords: list of keywords
- cql: common query language text. Note this overrides bbox, qtype, keywords
- identifier: record identifier. Note this overrides bbox, qtype, keywords, cql
"""
# construct request
node0 = self._setrootelement('csw:Transaction')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
validtransactions = ['insert', 'update', 'delete']
if ttype not in validtransactions: # invalid transaction
raise RuntimeError('Invalid transaction \'%s\'.' % ttype)
node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))
if ttype != 'update':
node1.set('typeName', typename)
if ttype == 'insert':
if record is None:
raise RuntimeError('Nothing to insert.')
node1.append(etree.fromstring(record))
if ttype == 'update':
if record is not None:
node1.append(etree.fromstring(record))
else:
if propertyname is not None and propertyvalue is not None:
node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))
etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname
etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, identifier)
if ttype == 'delete':
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
self._parsetransactionsummary()
self._parseinsertresult()
def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):
"""
Construct and process a Harvest request
Parameters
----------
- source: a URI to harvest
- resourcetype: namespace identifying the type of resource
- resourceformat: MIME type of the resource
- harvestinterval: frequency of harvesting, in ISO8601
- responsehandler: endpoint that CSW should responsd to with response
"""
# construct request
node0 = self._setrootelement('csw:Harvest')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source
etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype
if resourceformat is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat
if harvestinterval is not None:
etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval
if responsehandler is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))
if util.testXMLValue(val) is not None:
ts = val.attrib.get('timeStamp')
self.timestamp = util.testXMLValue(ts, True)
id = val.find(util.nspath_eval('csw:RequestId', namespaces))
self.id = util.testXMLValue(id)
else:
self._parsetransactionsummary()
self._parseinsertresult()
def get_operation_by_name(self, name):
"""Return a named operation"""
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError("No operation named %s" % name)
def getService_urls(self, service_string=None):
"""
Return easily identifiable URLs for all service types
Parameters
----------
- service_string: a URI to lookup
"""
urls=[]
for key,rec in self.records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def _parseinsertresult(self):
self.results['insertresults'] = []
for i in self._exml.findall(util.nspath_eval('csw:InsertResult', namespaces)):
for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):
self.results['insertresults'].append(util.testXMLValue(j))
def _parserecords(self, outputschema, esn):
if outputschema == namespaces['gmd']: # iso 19139
for i in self._exml.findall('.//'+util.nspath_eval('gmd:MD_Metadata', namespaces)) or self._exml.findall('.//'+util.nspath_eval('gmi:MI_Metadata', namespaces)):
val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = MD_Metadata(i)
elif outputschema == namespaces['fgdc']: # fgdc csdgm
for i in self._exml.findall('.//metadata'):
val = i.find('idinfo/datasetid')
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = Metadata(i)
elif outputschema == namespaces['dif']: # nasa dif
for i in self._exml.findall('.//'+util.nspath_eval('dif:DIF', namespaces)):
val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = DIF(i)
else: # process default
for i in self._exml.findall('.//'+util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):
val = i.find(util.nspath_eval('dc:identifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = CswRecord(i)
def _parsetransactionsummary(self):
val = self._exml.find(util.nspath_eval('csw:TransactionSummary', namespaces))
if val is not None:
rid = val.attrib.get('requestId')
self.results['requestid'] = util.testXMLValue(rid, True)
ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))
self.results['inserted'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))
self.results['updated'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))
self.results['deleted'] = int(util.testXMLValue(ts))
def _setesnel(self, esn):
""" Set the element name to parse depending on the ElementSetName requested """
el = 'Record'
if esn == 'brief':
el = 'BriefRecord'
if esn == 'summary':
el = 'SummaryRecord'
return el
def _setidentifierkey(self, el):
if el is None:
return 'owslib_random_%i' % random.randint(1,65536)
else:
return el
def _setrootelement(self, el):
if etree.__name__ == 'lxml.etree': # apply nsmap
return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)
else:
return etree.Element(util.nspath_eval(el, namespaces))
def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None, identifier=None):
if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:
node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))
node0.set('version', '1.1.0')
if identifier is not None: # set identifier filter, overrides all other parameters
flt = fes.FilterRequest()
node0.append(flt.set(identifier=identifier))
elif cql is not None: # send raw CQL query
# CQL passed, overrides all other parameters
node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))
node1.text = cql
else: # construct a Filter request
flt = fes.FilterRequest()
node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname,bbox=bbox))
def _invoke(self):
# do HTTP request
if isinstance(self.request, basestring): # GET KVP
req = Request(self.request)
if self.username is not None and self.password is not None:
base64string = base64.encodestring('%s:%s' % (self.username, self.password))[:-1]
req.add_header('Authorization', 'Basic %s' % base64string)
self.response = urlopen(req, timeout=self.timeout).read()
else:
xml_post_url = self.url
# Get correct POST URL based on Operation list.
# If skip_caps=True, then self.operations has not been set, so use
# default URL.
if hasattr(self, 'operations'):
caller = inspect.stack()[1][3]
if caller == 'getrecords2': caller = 'getrecords'
try:
op = self.get_operation_by_name(caller)
post_verbs = filter(lambda x: x.get('type').lower() == 'post', op.methods)
if len(post_verbs) > 1:
# Filter by constraints. We must match a PostEncoding of "XML"
try:
xml_post_url = next(x for x in filter(list, ([pv.get('url') for const in pv.get('constraints') if const.name.lower() == "postencoding" and 'xml' in map(lambda x: x.lower(), const.values)] for pv in post_verbs)))[0]
except StopIteration:
# Well, just use the first one.
xml_post_url = post_verbs[0].get('url')
elif len(post_verbs) == 1:
xml_post_url = post_verbs[0].get('url')
except: # no such luck, just go with xml_post_url
pass
self.request = cleanup_namespaces(self.request)
# Add any namespaces used in the "typeNames" attribute of the
# csw:Query element to the query's xml namespaces.
for query in self.request.findall(util.nspath_eval('csw:Query', namespaces)):
ns = query.get("typeNames", None)
if ns is not None:
# Pull out "gmd" from something like "gmd:MD_Metadata" from the list
# of typenames
ns_keys = [x.split(':')[0] for x in ns.split(' ')]
self.request = add_namespaces(self.request, ns_keys)
self.request = util.element_to_string(self.request, encoding='utf-8')
self.response = util.http_post(xml_post_url, self.request, self.lang, self.timeout, self.username, self.password)
self._parse_response(self.response)
def _parse_response(self, response):
'''parse in-memory xml string from a file obj or _invoke
'''
# parse result see if it's XML
self._exml = etree.parse(StringIO.StringIO(response))
# it's XML. Attempt to decipher whether the XML response is CSW-ish """
valid_xpaths = [
util.nspath_eval('ows:ExceptionReport', namespaces),
util.nspath_eval('csw:Capabilities', namespaces),
util.nspath_eval('csw:DescribeRecordResponse', namespaces),
util.nspath_eval('csw:GetDomainResponse', namespaces),
util.nspath_eval('csw:GetRecordsResponse', namespaces),
util.nspath_eval('csw:GetRecordByIdResponse', namespaces),
util.nspath_eval('csw:HarvestResponse', namespaces),
util.nspath_eval('csw:TransactionResponse', namespaces)
]
if self._exml.getroot().tag not in valid_xpaths:
raise RuntimeError('Document is XML, but not CSW-ish')
# check if it's an OGC Exception
val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))
if val is not None:
raise ows.ExceptionReport(self._exml, self.owscommon.namespace)
else:
self.exceptionreport = None
class CswRecord(object):
""" Process csw:Record, csw:BriefRecord, csw:SummaryRecord """
def __init__(self, record):
if hasattr(record, 'getroot'): # standalone document
self.xml = etree.tostring(record.getroot())
else: # part of a larger document
self.xml = etree.tostring(record)
# check to see if Dublin Core record comes from
# rdf:RDF/rdf:Description container
# (child content model is identical)
self.rdf = False
rdf = record.find(util.nspath_eval('rdf:Description', namespaces))
if rdf is not None:
self.rdf = True
record = rdf
# some CSWs return records with multiple identifiers based on
# different schemes. Use the first dc:identifier value to set
# self.identifier, and set self.identifiers as a list of dicts
val = record.find(util.nspath_eval('dc:identifier', namespaces))
self.identifier = util.testXMLValue(val)
self.identifiers = []
for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):
d = {}
d['scheme'] = i.attrib.get('scheme')
d['identifier'] = i.text
self.identifiers.append(d)
val = record.find(util.nspath_eval('dc:type', namespaces))
self.type = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:title', namespaces))
self.title = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:alternative', namespaces))
self.alternative = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:isPartOf', namespaces))
self.ispartof = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:abstract', namespaces))
self.abstract = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:date', namespaces))
self.date = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:created', namespaces))
self.created = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:issued', namespaces))
self.issued = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:relation', namespaces))
self.relation = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:temporal', namespaces))
self.temporal = util.testXMLValue(val)
self.uris = [] # list of dicts
for i in record.findall(util.nspath_eval('dc:URI', namespaces)):
uri = {}
uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)
uri['name'] = util.testXMLValue(i.attrib.get('name'), True)
uri['description'] = util.testXMLValue(i.attrib.get('description'), True)
uri['url'] = util.testXMLValue(i)
self.uris.append(uri)
self.references = [] # list of dicts
for i in record.findall(util.nspath_eval('dct:references', namespaces)):
ref = {}
ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)
ref['url'] = util.testXMLValue(i)
self.references.append(ref)
val = record.find(util.nspath_eval('dct:modified', namespaces))
self.modified = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:creator', namespaces))
self.creator = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:publisher', namespaces))
self.publisher = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:coverage', namespaces))
self.coverage = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:contributor', namespaces))
self.contributor = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:language', namespaces))
self.language = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:source', namespaces))
self.source = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))
self.rightsholder = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:accessRights', namespaces))
self.accessrights = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:license', namespaces))
self.license = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:format', namespaces))
self.format = util.testXMLValue(val)
self.subjects = []
for i in record.findall(util.nspath_eval('dc:subject', namespaces)):
self.subjects.append(util.testXMLValue(i))
self.rights = []
for i in record.findall(util.nspath_eval('dc:rights', namespaces)):
self.rights.append(util.testXMLValue(i))
val = record.find(util.nspath_eval('dct:spatial', namespaces))
self.spatial = util.testXMLValue(val)
val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))
if val is not None:
self.bbox = ows.BoundingBox(val, namespaces['ows'])
else:
self.bbox = None
val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))
if val is not None:
self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])
else:
self.bbox_wgs84 = None
|
{
"content_hash": "f14b296e5ad566d12d00ba1fe1403b36",
"timestamp": "",
"source": "github",
"line_count": 804,
"max_line_length": 268,
"avg_line_length": 43.49626865671642,
"alnum_prop": 0.606731291641646,
"repo_name": "b-cube/OwsCapable",
"id": "f72b66e8e4c78293736c0c00b94f7ce5be92fa9b",
"size": "35288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "owscapable/csw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "553283"
}
],
"symlink_target": ""
}
|
"""
Plotting terminal based scatterplots
"""
from __future__ import print_function
import csv
import sys
import optparse
from .utils.helpers import *
from .utils.commandhelp import scatter
def get_scale(series, is_y=False, steps=20):
min_val = min(series)
max_val = max(series)
scaled_series = []
for x in drange(min_val, max_val, (max_val - min_val) / steps):
if x > 0 and scaled_series and max(scaled_series) < 0:
scaled_series.append(0.0)
scaled_series.append(x)
if is_y:
scaled_series.reverse()
return scaled_series
def plot_scatter(f, xs, ys, size, pch, colour, title):
"""
Form a complex number.
Arguments:
f -- comma delimited file w/ x,y coordinates
xs -- if f not specified this is a file w/ x coordinates
ys -- if f not specified this is a filew / y coordinates
size -- size of the plot
pch -- shape of the points (any character)
colour -- colour of the points
title -- title of the plot
"""
if f:
if isinstance(f, str):
f = open(f)
data = [tuple(map(float, line.strip().split(','))) for line in f]
xs = [i[0] for i in data]
ys = [i[1] for i in data]
else:
xs = [float(str(row).strip()) for row in open(xs)]
ys = [float(str(row).strip()) for row in open(ys)]
plotted = set()
if title:
print(box_text(title, 2 * len(get_scale(xs, False, size)) + 1))
print("-" * (2 * len(get_scale(xs, False, size)) + 2))
for y in get_scale(ys, True, size):
print("|", end=' ')
for x in get_scale(xs, False, size):
point = " "
for (i, (xp, yp)) in enumerate(zip(xs, ys)):
if xp <= x and yp >= y and (xp, yp) not in plotted:
point = pch
#point = str(i)
plotted.add((xp, yp))
if x == 0 and y == 0:
point = "o"
elif x == 0:
point = "|"
elif y == 0:
point = "-"
printcolour(point, True, colour)
print("|")
print("-" * (2 * len(get_scale(xs, False, size)) + 2))
def main():
parser = optparse.OptionParser(usage=scatter['usage'])
parser.add_option('-f', '--file', help='a csv w/ x and y coordinates', default=None, dest='f')
parser.add_option('-t', '--title', help='title for the chart', default="", dest='t')
parser.add_option('-x', help='x coordinates', default=None, dest='x')
parser.add_option('-y', help='y coordinates', default=None, dest='y')
parser.add_option('-s', '--size', help='y coordinates', default=20, dest='size', type='int')
parser.add_option('-p', '--pch', help='shape of point', default="x", dest='pch')
parser.add_option('-c', '--colour', help='colour of the plot (%s)' %
colour_help, default='default', dest='colour')
opts, args = parser.parse_args()
if opts.f is None and (opts.x is None or opts.y is None):
opts.f = sys.stdin.readlines()
if opts.f or (opts.x and opts.y):
plot_scatter(opts.f, opts.x, opts.y, opts.size, opts.pch, opts.colour, opts.t)
else:
print("nothing to plot!")
if __name__ == "__main__":
main()
|
{
"content_hash": "85853d8a241681d82f5c1d207332d6af",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 98,
"avg_line_length": 31.980582524271846,
"alnum_prop": 0.5440194292653309,
"repo_name": "asbjorn/bashplotlib",
"id": "ddaf3cc323f316425ef8e9fcc4e93fcd2fd04617",
"size": "3341",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bashplotlib/scatterplot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14905"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
}
|
from OpenGL.arrays import vbo
from OpenGL.GL import *
from OpenGL.GL import shaders
import numpy
from numpy import array
import pyglfw.pyglfw as fw
import sys
def main():
### Initialize
fw.init()
### Hints
# ok, that doesn't work... can't get context for that
# fw.Window.hint(stereo=True)
### Monitor Info
# print_all_monitors()
### Rift
monitor = get_rift()
curmode = monitor.video_mode
curmode.refresh_rate # should be 74
# Check for Monitor - really should check for Rift attach/detach...
# this doesn't work like I think it should
fw.Monitor.set_callback(on_monitor)
win = CbWindow(640, 480, 'pyglfw')
win.make_current()
while not win.should_close:
render()
win.swap_buffers()
fw.poll_events()
if win.keys.escape:
win.should_close = True
fw.terminate()
def render():
# blue bg
blue = (0.0, 0.0, 1.0, 0.0)
glClearColor(*blue)
glClear(GL_COLOR_BUFFER_BIT)
# this should go in an init function really
# http://pyopengl.sourceforge.net/context/tutorials/shader_1.html
VERTEX_SHADER = shaders.compileShader("""
#version 120
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""
#version 120
void main() {
gl_FragColor = vec4( 0, 1, 0, 1 );
}
""", GL_FRAGMENT_SHADER)
shader = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
myvbo = vbo.VBO(
array( [
[ 0, 1, 0 ],
[ -1,-1, 0 ],
[ 1,-1, 0 ],
[ 2,-1, 0 ],
[ 4,-1, 0 ],
[ 4, 1, 0 ],
[ 2,-1, 0 ],
[ 4, 1, 0 ],
[ 2, 1, 0 ],
],'f')
)
shaders.glUseProgram(shader)
try:
myvbo.bind()
try:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointerf(myvbo)
glDrawArrays(GL_TRIANGLES, 0, 9)
finally:
myvbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY);
finally:
shaders.glUseProgram(0)
def on_monitor(_monitor, _event):
change_markers = {fw.Monitor.CONNECTED: '+', fw.Monitor.DISCONNECTED: '-'}
change = change_markers.get(_event, '~')
print("screen: %s %s" % (change, _monitor.name))
class CbWindow(fw.Window):
def __init__(self, *args, **kwargs):
super(CbWindow, self).__init__(*args, **kwargs)
self.set_key_callback(CbWindow.key_callback)
self.set_char_callback(CbWindow.char_callback)
self.set_scroll_callback(CbWindow.scroll_callback)
self.set_mouse_button_callback(CbWindow.mouse_button_callback)
self.set_cursor_enter_callback(CbWindow.cursor_enter_callback)
self.set_cursor_pos_callback(CbWindow.cursor_pos_callback)
self.set_window_size_callback(CbWindow.window_size_callback)
self.set_window_pos_callback(CbWindow.window_pos_callback)
self.set_window_close_callback(CbWindow.window_close_callback)
self.set_window_refresh_callback(CbWindow.window_refresh_callback)
self.set_window_focus_callback(CbWindow.window_focus_callback)
self.set_window_iconify_callback(CbWindow.window_iconify_callback)
self.set_framebuffer_size_callback(CbWindow.framebuffer_size_callback)
def key_callback(self, key, scancode, action, mods):
print(
"keybrd: key=%s scancode=%s action=%s mods=%s" %
(key, scancode, action, mods))
def char_callback(self, char):
print("unichr: char=%s" % char)
def scroll_callback(self, off_x, off_y):
print("scroll: x=%s y=%s" % (off_x, off_y))
def mouse_button_callback(self, button, action, mods):
print("button: button=%s action=%s mods=%s" % (button, action, mods))
def cursor_enter_callback(self, status):
print("cursor: status=%s" % status)
def cursor_pos_callback(self, pos_x, pos_y):
print("curpos: x=%s y=%s" % (pos_x, pos_y))
def window_size_callback(self, wsz_w, wsz_h):
print("window: w=%s h=%s" % (wsz_w, wsz_h))
def window_pos_callback(self, pos_x, pos_y):
print("window: x=%s y=%s" % (pos_x, pos_y))
def window_close_callback(self):
print("should: %s" % self.should_close)
def window_refresh_callback(self):
print("redraw")
def window_focus_callback(self, status):
print("active: status=%s" % status)
def window_iconify_callback(self, status):
print("hidden: status=%s" % status)
def framebuffer_size_callback(self, fbs_x, fbs_y):
print("buffer: x=%s y=%s" % (fbs_x, fbs_y))
### Swap
# window.swap_interval(0)
# window.make_current()
# window.swap_buffers()
def print_all_monitors():
'''
note you can run xrandr in the command line to get the info as well
rift will look like:
DP-0 connected primary 1920x1080+0+0 left (normal left inverted right x axis y axis) 71mm x 126mm
1080x1920 75.0*+ 72.0 60.0
1080x948 120.0
'''
print
for monitor in fw.get_monitors():
print monitor.name
print monitor.physical_size
print monitor.pos
for mode in monitor.video_modes:
print ' mode:'
print ' %s' % mode.width
print ' %s' % mode.height
print ' %s' % mode.refresh_rate
print
def get_rift():
'''
http://unix.stackexchange.com/questions/67983/get-monitor-make-and-model-and-other-info-in-human-readable-form
http://superuser.com/questions/800572/interpret-edid-information-to-get-manufacturer-and-type-number-of-my-laptop-scre
http://ubuntuforums.org/showthread.php?t=1946208
https://github.com/glfw/glfw/issues/212
If we could easily read the EDID that'd be much easier
Vendor is OVR
read-edid barfs w/ Nvida (maybe can use NV-CONTROL)
xrandr --verbose works...
'''
# For now, lets do the easiest thing and get it by physical size of DK2
for monitor in fw.get_monitors():
if monitor.physical_size == (71, 126):
return monitor
### TODO: DK1 physical size
# If we're still here (no rift?), lets just return the primary display
return fw.get_primary_monitor()
'''
# If a new monitor is connected...
def on_monitor_event(monitor, event):
if event == glfw.Monitor.CONNECTED:
print(monitor.name)
glfw.Monitor.set_callback(on_monitor_event)
# Hints
glfw.Window.hint()
glfw.Window.hint(client_api=glfw.Window.OPENGL_API)
w, h = curmode.width, curmode.height
window = glfw.Window(w, h, 'pyglfw')
window.close()
GLFW_STEREO GL_FALSE GL_TRUE or GL_FALSE
The GLFW_STEREO hint specifies whether to use stereoscopic rendering.
GLFW_REFRESH_RATE 0 0 to INT_MAX
The GLFW_REFRESH_RATE hint specifies the desired refresh rate for full screen windows. If set to zero, the highest available refresh rate will be used. This hint is ignored for windowed mode windows.
# Swap
# makes context current
# and restores previous
window.swap_interval(0)
window.make_current()
window.swap_buffers()
# Windows
if not window.should_close:
window.set_title('pyglfw')
size = window.size
window.show()
is_visible = window.visible
client_api = window.client_api
window.has_focus = True
def on_window_size(window, w, h):
window.size = size
window.set_window_size_callback(on_window_size)
# Inputs
mode = window.sticky_keys
window.sticky_mice = mode
is_escape = window.keys.escape
is_middle = window.mice.middle
cursor_at = window.cursor_pos
def on_key(window, key, scancode, action, mods):
if key == glfw.Keys.ESCAPE:
window.should_close = True
window.set_key_callback(on_key)
js = glfw.Joystick(0)
if js:
joy_name = js.name
joy_axes = js.axes
'''
if __name__ == '__main__':
main()
|
{
"content_hash": "b2cf92331d8c4e3311f0b163c1483362",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 199,
"avg_line_length": 25.0625,
"alnum_prop": 0.652710329439559,
"repo_name": "lhl/vrdev",
"id": "223603f59d3555cca5555a47567c782db9f422ab",
"size": "7642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "002-pyopengl/06-pyglfw-render.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "195562"
},
{
"name": "JavaScript",
"bytes": "1624"
},
{
"name": "Jupyter Notebook",
"bytes": "1091162"
},
{
"name": "Python",
"bytes": "856749"
},
{
"name": "Shell",
"bytes": "832"
}
],
"symlink_target": ""
}
|
from flask_restplus import Namespace, Resource
from willstores.service import ProductService
from willstores.util import Marshalling, Parsing
from willstores.controller import ErrorHandler
kindAPI = Namespace("Kind", description="Product kind related operations.", url_prefix="/kind")
marshalling = Marshalling(kindAPI)
INSEARCHMODEL = marshalling.in_fullsearchmodel("SearchIN")
OUTSEARCHMODEL = marshalling.out_fullsearchmodel("SearchOUT")
@kindAPI.route("/<string:kind>/", strict_slashes=False)
@kindAPI.param("kind", "The search kind.")
class KindController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__productservice = ProductService()
@kindAPI.expect(INSEARCHMODEL)
@kindAPI.marshal_with(OUTSEARCHMODEL, description="Search informations from a kind.", mask=False)
@kindAPI.param("payload", description="Optional.", _in="body", required=False)
def get(self, kind):
"""Product kind information."""
try:
in_pricerange = Parsing.parse_search()
total = self.__productservice.get_total(kind=kind, pricerange=in_pricerange)
brands = self.__productservice.get_brands(kind=kind, pricerange=in_pricerange)
kinds = self.__productservice.get_kinds(kind=kind, pricerange=in_pricerange)
if in_pricerange is None:
pricerange = self.__productservice.get_pricerange(kind=kind)
else:
pricerange = in_pricerange
return {"total": total, "pricerange": pricerange, "brands": brands, "kinds": kinds}
except Exception as error:
return ErrorHandler(error).handle_error()
INPAGEMODEL = marshalling.in_pagemodel("PageIN")
OUTPAGEMODEL = marshalling.out_pagemodel("PageOUT")
@kindAPI.route("/<string:kind>/<int:page>", strict_slashes=False)
@kindAPI.param("kind", "The search kind.")
@kindAPI.param("page", "The search page.")
class KindPageController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__productservice = ProductService()
@kindAPI.expect(INPAGEMODEL)
@kindAPI.marshal_with(OUTPAGEMODEL, description="Search products from a kind on a specific page.", mask=False)
@kindAPI.param("payload", description="Optional.", _in="body", required=False)
def get(self, kind, page):
"""Products of a kind on page."""
try:
in_pagesize, in_pricerange = Parsing.parse_search_page()
if in_pagesize is None:
products = self.__productservice.get_products(kind=kind, page=page, pricerange=in_pricerange)
else:
products = self.__productservice.get_products(kind=kind, page=page, pagesize=in_pagesize,
pricerange=in_pricerange)
return {"products": [p.to_dict_min() for p in products]}
except Exception as error:
return ErrorHandler(error).handle_error()
|
{
"content_hash": "504772ab8ee422b2a0cc6a9f4e2ee0d2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 114,
"avg_line_length": 41.90277777777778,
"alnum_prop": 0.6589327146171694,
"repo_name": "willrogerpereira/willbuyer",
"id": "b640202bace864c8fdbb6ab34110a5ec944a0787",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "willstores/willstores/controller/searchapis/kind.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4847"
},
{
"name": "HTML",
"bytes": "15697"
},
{
"name": "JavaScript",
"bytes": "38943"
},
{
"name": "Python",
"bytes": "83645"
}
],
"symlink_target": ""
}
|
"""
SVDSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimpose crystal structures. SVD stands for singular
value decomposition, which is used in the algorithm.
"""
from __future__ import print_function
from numpy import dot, transpose, sqrt, array
from numpy.linalg import svd, det
class SVDSuperimposer(object):
"""
SVDSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimpose crystal structures.
SVD stands for Singular Value Decomposition, which is used to calculate
the superposition.
Reference:
Matrix computations, 2nd ed. Golub, G. & Van Loan, CF., The Johns
Hopkins University Press, Baltimore, 1989
"""
def __init__(self):
self._clear()
# Private methods
def _clear(self):
self.reference_coords = None
self.coords = None
self.transformed_coords = None
self.rot = None
self.tran = None
self.rms = None
self.init_rms = None
def _rms(self, coords1, coords2):
"Return rms deviations between coords1 and coords2."
diff = coords1 - coords2
l = coords1.shape[0]
return sqrt(sum(sum(diff * diff)) / l)
# Public methods
def set(self, reference_coords, coords):
"""
Set the coordinates to be superimposed.
coords will be put on top of reference_coords.
o reference_coords: an NxDIM array
o coords: an NxDIM array
DIM is the dimension of the points, N is the number
of points to be superimposed.
"""
# clear everything from previous runs
self._clear()
# store cordinates
self.reference_coords = reference_coords
self.coords = coords
n = reference_coords.shape
m = coords.shape
if n != m or not(n[1] == m[1] == 3):
raise Exception("Coordinate number/dimension mismatch.")
self.n = n[0]
def run(self):
"Superimpose the coordinate sets."
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
coords = self.coords
reference_coords = self.reference_coords
# center on centroid
av1 = sum(coords) / self.n
av2 = sum(reference_coords) / self.n
coords = coords - av1
reference_coords = reference_coords - av2
# correlation matrix
a = dot(transpose(coords), reference_coords)
u, d, vt = svd(a)
self.rot = transpose(dot(transpose(vt), transpose(u)))
# check if we have found a reflection
if det(self.rot) < 0:
vt[2] = -vt[2]
self.rot = transpose(dot(transpose(vt), transpose(u)))
self.tran = av2 - dot(av1, self.rot)
def get_transformed(self):
"Get the transformed coordinate set."
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
if self.rot is None:
raise Exception("Nothing superimposed yet.")
if self.transformed_coords is None:
self.transformed_coords = dot(self.coords, self.rot) + self.tran
return self.transformed_coords
def get_rotran(self):
"Right multiplying rotation matrix and translation."
if self.rot is None:
raise Exception("Nothing superimposed yet.")
return self.rot, self.tran
def get_init_rms(self):
"Root mean square deviation of untransformed coordinates."
if self.coords is None:
raise Exception("No coordinates set yet.")
if self.init_rms is None:
self.init_rms = self._rms(self.coords, self.reference_coords)
return self.init_rms
def get_rms(self):
"Root mean square deviation of superimposed coordinates."
if self.rms is None:
transformed_coords = self.get_transformed()
self.rms = self._rms(transformed_coords, self.reference_coords)
return self.rms
if __name__ == "__main__":
# start with two coordinate sets (Nx3 arrays - float)
x = array([[51.65, -1.90, 50.07],
[50.40, -1.23, 50.65],
[50.68, -0.04, 51.54],
[50.22, -0.02, 52.85]], 'f')
y = array([[51.30, -2.99, 46.54],
[51.09, -1.88, 47.58],
[52.36, -1.20, 48.03],
[52.71, -1.18, 49.38]], 'f')
# start!
sup = SVDSuperimposer()
# set the coords
# y will be rotated and translated on x
sup.set(x, y)
# do the lsq fit
sup.run()
# get the rmsd
rms = sup.get_rms()
# get rotation (right multiplying!) and the translation
rot, tran = sup.get_rotran()
# rotate y on x
y_on_x1 = dot(y, rot) + tran
# same thing
y_on_x2 = sup.get_transformed()
print(y_on_x1)
print("")
print(y_on_x2)
print("")
print("%.2f" % rms)
|
{
"content_hash": "c909190809c058954de5473382ca4078",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 76,
"avg_line_length": 30.55757575757576,
"alnum_prop": 0.6025386751289171,
"repo_name": "poojavade/Genomics_Docker",
"id": "f14c112cd0a972d8fe68c3efe9cc08af2cf72188",
"size": "5270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SVDSuperimposer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
}
|
"""
XBlock Courseware Components
"""
# For backwards compatability, provide the XBlockMixin in xblock.fields
# without causing a circular import
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
import os
import warnings
import xblock.core
import xblock.fields
class XBlockMixin(xblock.core.XBlockMixin):
"""
A wrapper around xblock.core.XBlockMixin that provides backwards compatibility for the old location.
Deprecated.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.core.XBlockMixin", DeprecationWarning, stacklevel=2)
super(XBlockMixin, self).__init__(*args, **kwargs)
# For backwards compatability, provide the XBlockMixin in xblock.fields
# without causing a circular import
xblock.fields.XBlockMixin = XBlockMixin
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION.txt')
__version__ = codecs.open(VERSION_FILE, encoding='ascii').read().strip()
|
{
"content_hash": "48e2f7628bcd35f0975901a468a75dd5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 104,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.7401015228426396,
"repo_name": "mitodl/XBlock",
"id": "fe100d755859caf5bc42e08edbc73f2716e46b59",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xblock/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "168"
},
{
"name": "Python",
"bytes": "374921"
},
{
"name": "Shell",
"bytes": "1458"
}
],
"symlink_target": ""
}
|
''' Perforce related commands. '''
import abc
import shutil
import nimp.command
import nimp.utils.p4
def _is_p4_available():
if shutil.which('p4') is None:
return False, ('p4 executable was not found on your system, check your'
'installation.')
return True, ''
class P4Command(nimp.command.Command):
''' Perforce command base class '''
def __init__(self):
super(P4Command, self).__init__()
def configure_arguments(self, env, parser):
pass
def is_available(self, env):
return _is_p4_available()
@abc.abstractmethod
def run(self, env):
''' Executes the command '''
pass
class P4(nimp.command.CommandGroup):
''' Run Perforce commands '''
def __init__(self):
super(P4, self).__init__([_RevertWorkspace(),
_Submit(),
_Fileset()])
def configure_arguments(self, env, parser):
super(P4, self).configure_arguments(env, parser)
nimp.utils.p4.add_arguments(parser)
def is_available(self, env):
return _is_p4_available()
class _RevertWorkspace(P4Command):
''' Reverts and deletes all pending changelists '''
def __init__(self):
super(_RevertWorkspace, self).__init__()
def is_available(self, env):
return _is_p4_available()
def run(self, env):
if not nimp.utils.p4.check_for_p4(env):
return False
p4 = nimp.utils.p4.get_client(env)
return p4.clean_workspace()
class _Fileset(P4Command):
''' Runs perforce commands operation on a fileset. '''
def __init__(self):
super(_Fileset, self).__init__()
def configure_arguments(self, env, parser):
# These are not marked required=True because sometimes we don’t
# really need them.
super(_Fileset, self).configure_arguments(env, parser)
parser.add_argument('p4_operation',
help = 'Operation to perform on the fileset.',
choices = ['checkout', 'revert', 'reconcile', 'sync'])
parser.add_argument('fileset',
metavar = '<fileset>',
help = 'Fileset to load.')
parser.add_argument('changelist_description',
metavar = '<description>',
help = 'Changelist description format, will be interpolated with environment value.')
nimp.command.add_common_arguments(parser, 'platform', 'configuration',
'target', 'revision', 'free_parameters')
return True
def is_available(self, env):
return _is_p4_available()
def run(self, env):
if not nimp.utils.p4.check_for_p4(env):
return False
p4 = nimp.utils.p4.get_client(env)
# Dictionary below has the following structure:
# key: p4_operation
# value: [method, uses_a_changelist]
operations = { 'checkout' : [p4.edit, True],
'reconcile' : [p4.reconcile, True],
'revert' : [p4.revert, False],
'sync' : [p4.sync, False], }
files = nimp.system.map_files(env)
if files.load_set(env.fileset) is None:
return False
files = [file[0] for file in files()]
if operations[env.p4_operation][1]:
if env.changelist_description == 'default':
changelist = 'default'
else:
description = env.format(env.changelist_description)
changelist = p4.get_or_create_changelist(description)
return operations[env.p4_operation][0](changelist, *files)
else:
return operations[env.p4_operation][0](*files)
class _Submit(P4Command):
''' Submits a changelist identified by its description '''
def __init__(self):
super(_Submit, self).__init__()
def configure_arguments(self, env, parser):
# These are not marked required=True because sometimes we don’t
# really need them.
super(_Submit, self).configure_arguments(env, parser)
parser.add_argument('changelist_description',
metavar = '<description>',
help = 'Changelist description format, will be interpolated with environment value.')
return True
def is_available(self, env):
return _is_p4_available()
def run(self, env):
if not nimp.utils.p4.check_for_p4(env):
return False
p4 = nimp.utils.p4.get_client(env)
description = env.format(env.changelist_description)
changelist = p4.get_or_create_changelist(description)
return p4.submit(changelist)
|
{
"content_hash": "4945ba19161b46e55a9de0cbc783c253",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 113,
"avg_line_length": 32.54054054054054,
"alnum_prop": 0.5674833887043189,
"repo_name": "dontnod/nimp",
"id": "9285b8f28db080373c057db524d7f1b21ab16c3d",
"size": "5949",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "nimp/base_commands/p4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "434001"
}
],
"symlink_target": ""
}
|
"""
FILE: chat_thread_client_sample.py
DESCRIPTION:
These samples demonstrate create a chat thread client, to update
chat thread, get chat message, list chat messages, update chat message, send
read receipt, list read receipts, delete chat message, add participants, remove
participants, list participants, send typing notification
You need to use azure.communication.configuration module to get user access
token and user identity before run this sample
USAGE:
python chat_thread_client_sample.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
2) TOKEN - the user access token, from token_response.token
3) USER_ID - the user id, from token_response.identity
"""
import os
class ChatThreadClientSamples(object):
from azure.communication.identity import CommunicationIdentityClient
from azure.communication.chat import (
ChatClient,
CommunicationTokenCredential
)
connection_string = os.environ.get("COMMUNICATION_SAMPLES_CONNECTION_STRING", None)
if not connection_string:
raise ValueError("Set COMMUNICATION_SAMPLES_CONNECTION_STRING env before run this sample.")
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
user = identity_client.create_user()
tokenresponse = identity_client.get_token(user, scopes=["chat"])
token = tokenresponse.token
endpoint = os.environ.get("AZURE_COMMUNICATION_SERVICE_ENDPOINT", None)
if not endpoint:
raise ValueError("Set AZURE_COMMUNICATION_SERVICE_ENDPOINT env before run this sample.")
_thread_id = None
_message_id = None
new_user = identity_client.create_user()
_chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
def create_chat_thread_client(self):
token = self.token
endpoint = self.endpoint
user = self.user
# [START create_chat_thread_client]
from datetime import datetime
from azure.communication.chat import (
ChatClient,
ChatParticipant,
CommunicationUserIdentifier,
CommunicationTokenCredential
)
# retrieve `token` using CommunicationIdentityClient.get_token method
# set `endpoint` to ACS service endpoint
# create `user` using CommunicationIdentityClient.create_user method for new users;
# else for existing users set `user` = CommunicationUserIdentifier(some_user_id)
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
topic = "test topic"
participants = [ChatParticipant(
identifier=user,
display_name='name',
share_history_time=datetime.utcnow()
)]
create_chat_thread_result = chat_client.create_chat_thread(topic, thread_participants=participants)
chat_thread_client = chat_client.get_chat_thread_client(create_chat_thread_result.chat_thread.id)
# [END create_chat_thread_client]
self._thread_id = create_chat_thread_result.chat_thread.id
print("chat_thread_client created")
def get_chat_thread_properties(self):
thread_id = self._thread_id
token = self.token
endpoint = self.endpoint
# [START get_thread]
from azure.communication.chat import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
chat_thread_client = chat_client.get_chat_thread_client(thread_id)
chat_thread_properties = chat_thread_client.get_properties()
print('Expected Thread Id: ', thread_id, ' Actual Value: ', chat_thread_properties.id)
# [END get_thread]
print("get_chat_thread_properties succeeded, thread id: " + chat_thread.id + ", thread topic: " + chat_thread.topic)
def update_topic(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START update_topic]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
chat_thread_properties = chat_thread_client.get_properties()
previous_topic = chat_thread_properties.topic
topic = "updated thread topic"
chat_thread_client.update_topic(topic=topic)
chat_thread_properties = chat_thread_client.get_properties()
updated_topic = chat_thread_properties.topic
print("Chat Thread Topic Update: Previous value: ", previous_topic, ", Current value: ", updated_topic)
# [END update_topic]
print("update_chat_thread succeeded")
def send_message(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START send_message]
from azure.communication.chat import ChatMessageType
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# Scenario 1: Send message without specifying chat_message_type
send_message_result = chat_thread_client.send_message(
"Hello! My name is Fred Flinstone",
sender_display_name="Fred Flinstone")
send_message_result_id = send_message_result.id
# Scenario 2: Send message specifying chat_message_type
send_message_result_w_type = chat_thread_client.send_message(
"Hello! My name is Wilma Flinstone",
sender_display_name="Wilma Flinstone",
chat_message_type=ChatMessageType.TEXT) # equivalent to setting chat_message_type='text'
send_message_result_w_type_id = send_message_result_w_type.id
# Verify message content
print("First Message:", chat_thread_client.get_message(send_message_result_id).content.message)
print("Second Message:", chat_thread_client.get_message(send_message_result_w_type_id).content.message)
# [END send_message]
self._message_id = send_message_result_id
print("send_message succeeded, message_id=", send_message_result_id)
print("send_message succeeded with type specified, message_id:", send_message_result_w_type_id)
def get_message(self):
thread_id = self._thread_id
chat_client = self._chat_client
message_id = self._message_id
# [START get_message]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# set `message_id` to an existing message id
chat_message = chat_thread_client.get_message(message_id)
print("Message received: ChatMessage: content=", chat_message.content.message, ", id=", chat_message.id)
# [END get_message]
print("get_message succeeded, message id:", chat_message.id, \
"content: ", chat_message.content.message)
def list_messages(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START list_messages]
from datetime import datetime, timedelta
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
start_time = datetime.utcnow() - timedelta(days=1)
chat_messages = chat_thread_client.list_messages(results_per_page=1, start_time=start_time)
print("list_messages succeeded with results_per_page is 1, and start time is yesterday UTC")
for chat_message_page in chat_messages.by_page():
for chat_message in chat_message_page:
print("ChatMessage: message=", chat_message.content.message)
# [END list_messages]
print("list_messages succeeded")
def update_message(self):
thread_id = self._thread_id
chat_client = self._chat_client
message_id = self._message_id
# [START update_message]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# set `message_id` to an existing message id
previous_content = chat_thread_client.get_message(message_id).content.message
content = "updated content"
chat_thread_client.update_message(message_id, content=content)
current_content = chat_thread_client.get_message(message_id).content.message
print("Chat Message Updated: Previous value: ", previous_content, ", Current value: ", current_content)
# [END update_message]
print("update_message succeeded")
def send_read_receipt(self):
thread_id = self._thread_id
chat_client = self._chat_client
message_id = self._message_id
# [START send_read_receipt]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# set `message_id` to an existing message id
chat_thread_client.send_read_receipt(message_id)
# [END send_read_receipt]
print("send_read_receipt succeeded")
def list_read_receipts(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START list_read_receipts]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
read_receipts = chat_thread_client.list_read_receipts()
for read_receipt_page in read_receipts.by_page():
for read_receipt in read_receipt_page:
print(read_receipt)
# [END list_read_receipts]
print("list_read_receipts succeeded")
def delete_message(self):
thread_id = self._thread_id
chat_client = self._chat_client
message_id = self._message_id
# [START delete_message]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# set `message_id` to an existing message id
chat_thread_client.delete_message(message_id)
# [END delete_message]
print("delete_message succeeded")
def list_participants(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START list_participants]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
chat_thread_participants = chat_thread_client.list_participants()
for chat_thread_participant_page in chat_thread_participants.by_page():
for chat_thread_participant in chat_thread_participant_page:
print("ChatParticipant: ", chat_thread_participant)
# [END list_participants]
print("list_participants succeeded")
def add_participants_w_check(self):
# initially remove already added user
thread_id = self._thread_id
chat_client = self._chat_client
user = self.new_user
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
chat_thread_client.remove_participant(user)
# [START add_participants]
from azure.communication.chat import ChatParticipant
from datetime import datetime
def decide_to_retry(error):
"""
Custom logic to decide whether to retry to add or not
"""
return True
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# create `user` using CommunicationIdentityClient.create_user method for new users;
# else for existing users set `user` = CommunicationUserIdentifier(some_user_id)
new_participant = ChatParticipant(
identifier=user,
display_name='name',
share_history_time=datetime.utcnow())
# create list containing one or more participants
thread_participants = [new_participant]
result = chat_thread_client.add_participants(thread_participants)
# list of participants which were unsuccessful to be added to chat thread
retry = [p for p, e in result if decide_to_retry(e)]
if retry:
chat_thread_client.add_participants(retry)
# [END add_participants]
print("add_participants_w_check succeeded")
def remove_participant(self):
thread_id = self._thread_id
chat_client = self._chat_client
identity_client = self.identity_client
from azure.communication.chat import ChatParticipant, CommunicationUserIdentifier
from datetime import datetime
# create 2 new users using CommunicationIdentityClient.create_user method
user1 = identity_client.create_user()
user2 = identity_client.create_user()
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
# add user1 and user2 to chat thread
participant1 = ChatParticipant(
identifier=user1,
display_name='Fred Flinstone',
share_history_time=datetime.utcnow())
participant2 = ChatParticipant(
identifier=user2,
display_name='Wilma Flinstone',
share_history_time=datetime.utcnow())
thread_participants = [participant1, participant2]
chat_thread_client.add_participants(thread_participants)
# [START remove_participant]
# Option 1 : Iterate through all participants, find and delete Fred Flinstone
chat_thread_participants = chat_thread_client.list_participants()
for chat_thread_participant_page in chat_thread_participants.by_page():
for chat_thread_participant in chat_thread_participant_page:
print("ChatParticipant: ", chat_thread_participant)
if chat_thread_participant.identifier.properties['id'] == user1.properties['id']:
print("Found Fred!")
chat_thread_client.remove_participant(chat_thread_participant.identifier)
print("Fred has been removed from the thread...")
break
# Option 2: Directly remove Wilma Flinstone
unique_identifier = user2.properties['id'] # in real scenario the identifier would need to be retrieved from elsewhere
chat_thread_client.remove_participant(CommunicationUserIdentifier(unique_identifier))
print("Wilma has been removed from the thread...")
# [END remove_participant]
# clean up temporary users
self.identity_client.delete_user(user1)
self.identity_client.delete_user(user2)
print("remove_chat_participant succeeded")
def send_typing_notification(self):
thread_id = self._thread_id
chat_client = self._chat_client
# [START send_typing_notification]
# set `thread_id` to an existing thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id=thread_id)
chat_thread_client.send_typing_notification()
# [END send_typing_notification]
print("send_typing_notification succeeded")
def clean_up(self):
print("cleaning up: deleting created users.")
self.identity_client.delete_user(self.user)
self.identity_client.delete_user(self.new_user)
if __name__ == '__main__':
sample = ChatThreadClientSamples()
sample.create_chat_thread_client()
sample.update_topic()
sample.send_message()
sample.get_message()
sample.list_messages()
sample.update_message()
sample.send_read_receipt()
sample.list_read_receipts()
sample.delete_message()
sample.add_participants_w_check()
sample.list_participants()
sample.remove_participant()
sample.send_typing_notification()
sample.clean_up()
|
{
"content_hash": "a61f4f85689ab24ad84a6fb61116f766",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 126,
"avg_line_length": 41.30848329048843,
"alnum_prop": 0.6631402078536313,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1d082a08426e0d1d4f93c62f4baf8ae7fed5ee04",
"size": "16380",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-communication-chat/samples/chat_thread_client_sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.5.1-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import functools # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class FreeStyleBuild(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_class = StrSchema
number = IntSchema
url = StrSchema
class actions(
ListSchema
):
@classmethod
@property
def _items(cls) -> typing.Type['CauseAction']:
return CauseAction
building = BoolSchema
description = StrSchema
displayName = StrSchema
duration = IntSchema
estimatedDuration = IntSchema
executor = StrSchema
fullDisplayName = StrSchema
id = StrSchema
keepLog = BoolSchema
queueId = IntSchema
result = StrSchema
timestamp = IntSchema
builtOn = StrSchema
@classmethod
@property
def changeSet(cls) -> typing.Type['EmptyChangeLogSet']:
return EmptyChangeLogSet
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_class: typing.Union[_class, Unset] = unset,
number: typing.Union[number, Unset] = unset,
url: typing.Union[url, Unset] = unset,
actions: typing.Union[actions, Unset] = unset,
building: typing.Union[building, Unset] = unset,
description: typing.Union[description, Unset] = unset,
displayName: typing.Union[displayName, Unset] = unset,
duration: typing.Union[duration, Unset] = unset,
estimatedDuration: typing.Union[estimatedDuration, Unset] = unset,
executor: typing.Union[executor, Unset] = unset,
fullDisplayName: typing.Union[fullDisplayName, Unset] = unset,
id: typing.Union[id, Unset] = unset,
keepLog: typing.Union[keepLog, Unset] = unset,
queueId: typing.Union[queueId, Unset] = unset,
result: typing.Union[result, Unset] = unset,
timestamp: typing.Union[timestamp, Unset] = unset,
builtOn: typing.Union[builtOn, Unset] = unset,
changeSet: typing.Union['EmptyChangeLogSet', Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'FreeStyleBuild':
return super().__new__(
cls,
*args,
_class=_class,
number=number,
url=url,
actions=actions,
building=building,
description=description,
displayName=displayName,
duration=duration,
estimatedDuration=estimatedDuration,
executor=executor,
fullDisplayName=fullDisplayName,
id=id,
keepLog=keepLog,
queueId=queueId,
result=result,
timestamp=timestamp,
builtOn=builtOn,
changeSet=changeSet,
_configuration=_configuration,
**kwargs,
)
from openapi_client.model.cause_action import CauseAction
from openapi_client.model.empty_change_log_set import EmptyChangeLogSet
|
{
"content_hash": "b90d799cafa024142d80f854caceba48",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 26.87421383647799,
"alnum_prop": 0.6288322021998596,
"repo_name": "cliffano/swaggy-jenkins",
"id": "76c3681a9e61fd19bbf0607aabb049d4b6e71b9f",
"size": "4290",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-experimental/generated/openapi_client/model/free_style_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
import numpy as np
class TikzGraph():
def __init__(self, i_font_scaler, i_global_scale, i_filename):
self.font_scaler = i_font_scaler
self.global_scale = i_global_scale
self.filename = i_filename
###################################### File/Picture operations
def initPicture(self):
self.fout.write("\\begin{tabular}{c} \n")
self.addPicture();
def addPicture(self):
if self.global_scale > 0.75:
thickness = "thin"
else:
thickness = "very thin"
arrow_style= ">=latex , " + thickness
self.fout.write("\t\\begin{tikzpicture}[scale=%.2f,%s] \n" % (self.global_scale, arrow_style))
def endPicture(self):
self.fout.write("\t\end{tikzpicture} \n")
self.fout.write("\t\\\ \n")
def initGraphFile(self):
self.fout = open(self.filename, "w")
self.initPicture();
def endGraphFile(self):
self.endPicture();
self.fout.write("\end{tabular} \n")
self.fout.close()
def writeToFile(self, i_str):
self.fout.write( i_str + "\n")
##################################### Include
def includePictureAsNodeText(self, picture_file, x, y, i_width):
"""Include a picture as the text inside a node at the given x,y coordinates """
include_str = "{\includegraphics[width=" + str(i_width) + "cm]{" + picture_file + "}};"
o_str = self.getNode( x, y, node_description="above") + include_str
self.writeToFile(o_str)
##################################### Get
def getNodeLabel(self, i_text, x, y, node_description=None, label=None):
"""Return text as node at x,y"""
node_str = self.getNode( x, y,label, node_description) + "{" + i_text + "};"
self.writeToFile(node_str)
def getFloatLabel(self, i_display_text, val, node_description=None,x=None,y=None):
""" *Return the label associated with a transition link for the given probability (float.
*This will be a node inserted into a specific path
*node_description: square brackets [] to follow node"""
o_str = "node"
if node_description is not None:
o_str += ( "["+node_description+"]")
if (x is not None) and (y is not None):
o_str += (" at " + self.getXY(x,y))
if not i_display_text:
return o_str + "{}"
return o_str + ("{%.1f}" % val )
def getLabel(self, label):
"""The label of coordinate/node to connect later on"""
return "(%s)" % label
def getRadiusAngle(self, angle,radius):
"""Get coordinate in polar coordinates"""
return "(%.9f: %.9fcm)" % (angle,radius)
def getXY(self, x, y):
"""Return the xy coordinate in the correct string format"""
return "(%.9fcm, %.9fcm)" % (x,y)
def getArc(self, angle1, angle2, radius):
"""Return the arc parameterisation in the correct string format"""
return "(%.9f:%.9f:%.9fcm)" % (angle1, angle2, radius)
def getNode(self, x, y, label=None, node_description=None):
"""Return a node at x,y with/without a label in the correct format"""
x_str = str(x) + "cm"
y_str = str(y) + "cm"
o_str = "\t\t\\node"
if node_description is not None:
o_str += ( "["+node_description+"]")
if label is not None:
o_str += self.getLabel(label)
o_str += ( " at" + self.getXY(x,y) )
return o_str
class StateGraphParams( ):
def __init__(self, i_filename, i_scale=1.0 ):
self.filename = i_filename
#Typical variables to adjust
self.global_scale =i_scale #Scaling factor of the whole picture
#State variables
self.ns = 2.0 # The distance between nodes in cm
self.rect_width = 0.4 # The rectangle width of a state
self.rect_height = 2.0
#Transition variables
self.height_offset=1.0 # The height of the transition link top above top of state
self.show_probs = True # Draw transition probs
#Text properties
self.font_scaler = 1.0 # Scale all the font with this number
#Picture variables
self.pict_width=1.0 # Picture width associated with the state at the top
self.pdf_files = None #List of pictures associated with the pdf of each state
#x offset of origin in cm
self.x_offset = 0
class StateGraph(TikzGraph):
##################################### Init
""" Generate a tikz graph for a generic first-order state, where the origin is at (0,0)"""
def __init__(self, i_graph_params):
self.p = i_graph_params
TikzGraph.__init__(self, self.p.font_scaler, self.p.global_scale, self.p.filename)
##################################### Main
def compute(self, i_input_string, i_states, i_transitions, i_transition_probs, flip_click_labels=False):
colour_table = {'Click': 'red', 'Miss':'black','Err': 'magenta', 'Failure':'blue','Correct':'green'}
self.initGraphFile()
#Do the bounding box
o_str = "\t\t\draw[-] (-1.7cm,4.6cm) rectangle ( 18.8cm, -4.8cm);"
self.writeToFile(o_str)
#Show how the states are labelled in the bottom right corner
self.getStateLabelText()
state_idx = {}
for i in range(0,len(i_states)):
state_idx[i_states[i]] = i+1
#Iterate through the states and generate the nodes
for i in range(0, len(i_states)):
print "---------------------------------------------------------"
print "---------------------------------------------------------"
state_id = i_states[i]
state_num = state_idx[state_id]
state_text = list(state_id)
if i < (len(i_states)-3):
input_idx = int(state_text[2])+1
state_text[2] = i_input_string[int(state_text[2])]
if state_text[2] == "_":
state_text[2] = "\_"
if state_text[0] == "_":
state_text[0] = "\_"
else:
input_idx = "-"
label="s"+str(state_num)
print "i = ", i, " state_text = " , state_text
self.drawState(state_num, state_text, label, input_idx)
all_dest = np.array([state_idx[t] for t in i_transitions[state_id]])
transition_probs = np.array([t for t in i_transition_probs[i]])
#Draw the transtions
for j in range(0, len(all_dest)):
dest = all_dest[j]
dest_id = i_transitions[i_states[i]][j]
dest_dest = np.array([state_idx[t] for t in i_transitions[dest_id]])
forwards = True
line_thickness = "thin"
if (not flip_click_labels):
is_dest_click = (j == 0)
else:
is_dest_click = ( j > 0 ) and (len(all_dest) > 1)
if (state_id[0] == "D") and (j < 1):
"""Links far apart associated with Delete key"""
forwards = False
if (dest_id == "Correct") or (dest_id == "Failure") or (dest_id == "Err"):
colour = colour_table[dest_id]
if (dest_id == "Failure") or (dest_id == "Err"):
forwards = False
else:
line_thickness = "thick"
elif is_dest_click:
colour = colour_table["Click"]
else:
colour = colour_table["Miss"]
if (not state_id[0] == "D") and (state_num > dest):
forwards = False
if state_num > dest:
forwards = False
else:
forwards = True
self.drawTransition( state_num, dest, dest_dest, transition_probs[j], colour, forwards, line_thickness)
self.endGraphFile()
##################################### Get
def getStateCenterCoordinate(self, state ):
"""Get xy coordate of a the centroid associated with a specific state - state in [1,...,N],
where N is the number of states.
if i_row_state is True the counter will increment in y direction downwards otherwise
in vertical direction."""
return ((state-1)*self.p.ns + self.p.x_offset, 0)
def getStateRectCorners(self, state):
(x, y) = self.getStateCenterCoordinate(state)
top_x = x - 0.5*self.p.rect_width
top_y = y + 0.5*self.p.rect_height
bottom_x = x + 0.5*self.p.rect_width
bottom_y = y - 0.5*self.p.rect_height
return (top_x, top_y, bottom_x, bottom_y)
def getStateText(self, i_state_num, i_state_label, input_idx ):
"""The text inside the state circle"""
str_label = ''.join(i_state_label)
if (str_label == "Correct") or (str_label == "Failure") or (str_label == "Err"):
if str_label == "Failure":
state_text = "Failure (" + str(i_state_num) + ")"
elif str_label == "Correct":
state_text = "Correct (" + str(i_state_num) + ")"
else:
state_text = "Error (" + str(i_state_num) + ")"
return "{\\begin{sideways}" + state_text + " \\end{sideways}};"
i_letter = "%s" % i_state_label[2]
i_click = "$%s$" % i_state_label[4]
if i_state_label[0] == "D":
o_letter = "$\\leftarrow$"
else:
o_letter = i_state_label[0]
o_letter = "%s" % o_letter
if i_state_label[1] == "*":
i_undo = ""
scan = "R"
else:
i_undo = "$%s$" % i_state_label[6]
scan = "C"
state_text = "{\\begin{tabular}{c}"
state_text += ( "{\\scriptsize "+ str(i_state_num) + " }" + "\\\\")
state_text += (scan + "\\\\")
state_text += (str(input_idx) + "\\\\")
state_text += (o_letter + "\\\\")
state_text += (i_letter + "\\\\")
state_text += (i_click + "\\\\")
state_text += (i_undo + "\\\\")
state_text += "\end{tabular}};"
return state_text
def getStateLabelText(self):
"""Return the text describing how each cell is labelled"""
state_text = "{\\begin{tabular}{c}"
state_text += ("$n$ \\\\")
state_text += ("R/C \\\\")
state_text += ("$m$ \\\\")
state_text += ("$\ell_{v'}$ \\\\")
state_text += ("$w_{\mathrm{x}}^{m}$ \\\\")
state_text += ("$e$ \\\\")
state_text += ("$u$ \\\\")
state_text += "\end{tabular}};"
(x,y) = self.getStateCenterCoordinate(1)
o_str = self.getNode(x-self.p.x_offset, y, "s100" ) + state_text
self.writeToFile(o_str)
##################################### Main draw
def drawState(self, state_num, state_id, label, input_idx):
""" * Draw an State state - it is a node with a circle around it. The text inside the
* circle indicates the state number (top) and pdf number (bottom), both from 1..N (states)
and 1...K (pdfs)"""
(x, y) = self.getStateCenterCoordinate(state_num)
o_str = self.getNode(x, y, label ) + self.getStateText( state_num, state_id, input_idx )
self.writeToFile(o_str)
(top_x, top_y, bottom_x, bottom_y) = self.getStateRectCorners(state_num)
rect_label = "rect" + list(label)[-1]
o_str = "\t\t\draw (%.4fcm,%.4fcm) rectangle(%.4fcm,%.4fcm);" % (top_x,top_y,bottom_x,bottom_y)
self.writeToFile(o_str)
def drawTransition(self, state, dest, dest_dest, prob, i_colour="black", forwards=True, i_line_thickness="thin" ):
""" * Neighbouring transitions are drawn treated a bit special because they occur so frequently
and have to be drawn neatly.
* If the links between two neighbouring states are bidirectional, i.e., state can go to dest
and dest can go to
state with nonzero probability, the links are drawn the same as other transition links, but the lines will be straight (not bended).
* If the links between two neighbouring states are unidirectional, the transition link will be a straight line originating at zero angle."""
properties = i_line_thickness + " , " + i_colour
if (dest == state):
(x_src, y_src) = self.getStateCenterCoordinate(state)
(top_x, top_y, bottom_x, bottom_y) = self.getStateRectCorners(state)
xy_src = self.getXY(x_src, bottom_y)
above = True
o_str = "\t\t\path[-]" + xy_src + " edge["
in_angle = 270 + 30.0
out_angle = 270 - 30.0
dist = self.p.height_offset + np.cos(np.pi*in_angle / (180.0))
loop = "out=%.4f, in=%.4f, distance=%.4fcm" % (in_angle, out_angle, dist)
properties = i_line_thickness + "," + i_colour + "," + loop + ", ->]"
if above:
node_desciption = "above "
else:
node_description = "below "
node = self.getFloatLabel( self.p.show_probs, prob, node_desciption)
o_str += ( properties + node + xy_src + ";")
self.writeToFile(o_str)
else:
self.__drawNonSelfloopsTransitions( state, dest, prob, i_colour, forwards, i_line_thickness )
##Bidirectional links
#self.__drawNonSelfloopsTransitions( state, dest, transition_probs[state-1, dest-1], self.p.dangle_skiplinks )
#################################### Private draw
def __drawNonSelfloopsTransitions(self, state,dest, prob, i_colour ,i_forwards , i_line_thickness ):
""" Draw a transition from the node associated with "state" to the node associate with "dest. """
#Compute arrow starting and end points (angle from x axis in state)
#All skiplinks start from the same point and all neighbouring links start from the same point.
(x_src, y_src) = self.getStateCenterCoordinate(state)
(x_dest, y_dest) = self.getStateCenterCoordinate(dest)
(top_x, top_y, bottom_x, bottom_y) = self.getStateRectCorners(state)
x_middle = 0.5*(x_dest - x_src) + x_src
properties = i_line_thickness + " , " + i_colour + ", overlay"
if not i_forwards:
xy_middle = self.getXY(x_middle, bottom_y - self.p.height_offset)
xy_src = self.getXY(x_src, bottom_y)
xy_dest = self.getXY(x_dest, bottom_y)
above = False
angle_str = "to[out=200,in=270]"
else:
xy_middle = self.getXY(x_middle, top_y + self.p.height_offset)
xy_src = self.getXY(x_src, top_y)
xy_dest = self.getXY(x_dest, top_y)
above = True
angle_str = "to[out=30,in=60]"
#o_str = "\t\t\path[-]" + xy_src + " edge["
#if above:
# o_str += "above, "
#else:
# o_str += "below, "
#o_str += ( properties + "] " + self.getFloatLabel( self.p.show_probs, prob) + xy_middle + ";")
#self.writeToFile(o_str)
#o_str = "\t\t\draw[edge, ->, " + properties + "] " + xy_middle + "--" + xy_dest + ";"
o_str = "\t\t\draw[edge, ->, " + properties + "] " + xy_src + angle_str + xy_dest + ";"
self.writeToFile(o_str)
def __drawSelfloop(self, xc, yc, prob):
(selfloop_x, selfloop_y) = self.p.getSelfLoopXY()
(x,y) = (selfloop_x+xc, selfloop_y+yc)
angle1 = -45.0
angle2 = -45.0 + 270.0
o_str = "\t\t\draw[edge,<-]" + self.getXY(x[0],y[0]) + " arc " + self.getArc(angle1,angle2, self.p.selfloop_rad) + ";"
self.writeToFile(o_str)
label_x = xc
label_y = y[0] + np.sqrt(self.p.selfloop_rad**2 - (x[0] - label_x)**2) + self.p.selfloop_rad
if self.p.show_probs:
o_str = "\t\t\\" + self.getFloatLabel( self.p.show_probs, prob, node_description="above",x=label_x , y=label_y ) + ";"
self.writeToFile(o_str)
|
{
"content_hash": "3687d888c828babc4f9716e43307d49b",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 151,
"avg_line_length": 46.78089887640449,
"alnum_prop": 0.5082862975861655,
"repo_name": "singleswitch/ticker",
"id": "8607367a9529c5aef0288a50929a9beb41b89a1d",
"size": "16655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/simulations/tikz_graphs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5053"
},
{
"name": "C++",
"bytes": "23830"
},
{
"name": "Makefile",
"bytes": "1049"
},
{
"name": "Python",
"bytes": "1004504"
},
{
"name": "Shell",
"bytes": "7102"
},
{
"name": "TeX",
"bytes": "12004"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from abc import ABCMeta, abstractmethod
from sqlalchemy import create_engine, exc, event, MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import Pool
import gevent.local
class SessionManager(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_session(self, create=True, **Session_kwargs):
pass
class GEventLocalSessionManager(SessionManager):
def __init__(self, Session):
self._Session = Session
self._local = gevent.local.local()
def get_session(self, create=True, **Session_kwargs):
try:
return self._local.session
except AttributeError:
if create:
self._local.session = self._Session(**Session_kwargs)
return self._local.session
else:
return None
class SingletonSessionManager(SessionManager):
def __init__(self, Session):
self._Session = Session
self._session = None
def get_session(self, create=True, **Session_kwargs):
if self._session is not None:
return self._session
else:
if create:
self._session = self._Session(**Session_kwargs)
return self._session
else:
return None
_config = {}
metadata = MetaData()
def get_session(name='main', create=True, **Session_kwargs):
if name in _config and 'session_manager' in _config[name]:
return _config[name]['session_manager'].get_session(create, **Session_kwargs)
else:
raise UnconfiguredSessionException('The database %s is not configured!' % name)
def get_session_manager(name='main'):
return _config[name]['session_manager']
def get_engine(name='main'):
return _config[name]['engine']
def configure(name, db_url, db_pool_size=5, session_manager_class=GEventLocalSessionManager, echo=False):
from angular_momentum.models.base import BaseModel
global _config
_config[name] = {}
_config[name]['engine'] = create_engine(db_url, pool_size=int(db_pool_size), echo=echo)
BaseModel.metadata.bind = _config[name]['engine']
_config[name]['Session'] = sessionmaker(bind=_config[name]['engine'])
_config[name]['session_manager'] = session_manager_class(_config[name]['Session'])
# Avoid the use of connections that were invalidated by a database restart; see
# http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html#disconnect-handling-pessimistic
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
connection_proxy._pool.dispose()
raise exc.DisconnectionError()
cursor.close()
class UnconfiguredSessionException(Exception):
pass
|
{
"content_hash": "1c4dea251fb665d3a3c4ec4fbd82d18d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 105,
"avg_line_length": 32.34782608695652,
"alnum_prop": 0.667002688172043,
"repo_name": "holycattle/angular-momentum",
"id": "fed2a59c0f682d0fe924c508c538527f8dbfdee4",
"size": "2976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/angular_momentum/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7569"
},
{
"name": "HTML",
"bytes": "3020"
},
{
"name": "JavaScript",
"bytes": "3615"
},
{
"name": "Python",
"bytes": "6103"
},
{
"name": "Shell",
"bytes": "178"
},
{
"name": "TypeScript",
"bytes": "62"
}
],
"symlink_target": ""
}
|
"""JSON implementations of repository objects."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
import base64
import datetime
import gridfs
import importlib
from . import default_mdata
from .. import utilities
from ..id.objects import IdList
from ..osid import markers as osid_markers
from ..osid import objects as osid_objects
from ..osid.markers import Extensible
from ..osid.metadata import Metadata
from ..osid.osid_errors import *
from ..primitives import *
from ..primitives import DataInputStream
from ..primitives import DisplayText
from ..primitives import Id
from ..utilities import JSONClientValidated
from ..utilities import get_provider_manager
from ..utilities import get_registry
from ..utilities import update_display_text_defaults
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.repository import objects as abc_repository_objects
from dlkit.primordium.id.primitives import Id
class Asset(abc_repository_objects.Asset, osid_objects.OsidObject, osid_markers.Aggregateable, osid_markers.Sourceable):
"""An ``Asset`` represents some digital content.
Example assets might be a text document, an image, or a movie. The
content data, and metadata related directly to the content format
and quality, is accessed through ``AssetContent. Assets`` , like all
``OsidObjects,`` include a type a record to qualify the ``Asset``
and include additional data. The division between the ``Asset``
``Type`` and ``AssetContent`` is to separate data describing the
asset from data describing the format of the contents, allowing a
consumer to select among multiple formats, sizes or levels of
fidelity.
An example is a photograph of the Bay Bridge. The content may
deliver a JPEG in multiple resolutions where the ``AssetContent``
may also desribe size or compression factor for each one. The
content may also include an uncompressed TIFF version. The ``Asset``
``Type`` may be "photograph" indicating that the photo itself is the
asset managed in this repository.
Since an Asset may have multiple ``AssetContent`` structures, the
decision of how many things to stuff inside a single asset comes
down to if the content is actually a different format, or size, or
quality, falling under the same creator, copyright, publisher and
distribution rights as the original. This may, in some cases,
provide a means to implement some accessibility, it doesn't handle
the case where, to meet an accessibility requirement, one asset
needs to be substituted for another. The Repository OSID manages
this aspect outside the scope of the core ``Asset`` definition.
``Assets`` map to ``AssetSubjects``. ``AssetSubjects`` are
``OsidObjects`` that capture a subject matter. In the above example,
an ``AssetSubject`` may be defined for the Bay Bridge and include
data describing the bridge. The single subject can map to multiple
assets depicting the bridge providing a single entry for a search
and a single place to describe a bridge. Bridges, as physical items,
may also be described using the Resource OSID in which case the use
of the ``AssetSubject`` acts as a cover for the underlying
``Resource`` to assist repository-only consumers.
The ``Asset`` definition includes some basic copyright and related
licensing information to assist in finding free-to-use content, or
to convey the distribution restrictions that may be placed on the
asset. Generally, if no data is available it is to be assumed that
all rights are reserved.
A publisher is applicable if the content of this ``Asset`` has been
published. Not all ``Assets`` in this ``Repository`` may have a
published status and such a status may effect the applicability of
copyright law. To trace the source of an ``Asset,`` both a provider
and source are defined. The provider indicates where this repository
acquired the asset and the source indicates the original provider or
copyright owner. In the case of a published asset, the source is the
publisher.
``Assets`` also define methods to facilitate searches over time and
space as it relates to the subject matter. This may at times be
redundant with the ``AssetSubject``. In the case of the Bay Bridge
photograph, the temporal coverage may include 1936, when it opened,
and/or indicate when the photo was taken to capture a current event
of the bridge. The decision largeley depends on what desired effect
is from a search. The spatial coverage may describe the gps
coordinates of the bridge or describe the spatial area encompassed
in the view. In either case, a "photograph" type may unambiguously
defined methods to describe the exact time the photograph was taken
and the location of the photographer.
The core Asset defines methods to perform general searches and
construct bibliographic entries without knowledge of a particular
``Asset`` or ``AssetContent`` record ``Type``.
"""
_namespace = 'repository.Asset'
def __init__(self, **kwargs):
osid_objects.OsidObject.__init__(self, object_name='ASSET', **kwargs)
self._catalog_name = 'Repository'
if self.is_composition():
self._composition = self.get_composition()
def __getattr__(self, name):
if self.is_composition():
try:
return self._composition[name]
except AttributeError:
raise AttributeError()
# HOW TO PASS TO EXTENSIBLE!!!!
def get_title(self):
"""Gets the proper title of this asset.
This may be the same as the display name or the display name may
be used for a less formal label.
return: (osid.locale.DisplayText) - the title of this asset
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_title_template
return DisplayText(self._my_map['title'])
title = property(fget=get_title)
def is_copyright_status_known(self):
"""Tests if the copyright status is known.
return: (boolean) - ``true`` if the copyright status of this
asset is known, ``false`` otherwise. If ``false,
is_public_domain(),`` ``can_distribute_verbatim(),
can_distribute_alterations() and
can_distribute_compositions()`` may also be ``false``.
*compliance: mandatory -- This method must be implemented.*
"""
return bool(self._my_map['copyright']['text'])
def is_public_domain(self):
"""Tests if this asset is in the public domain.
An asset is in the public domain if copyright is not applicable,
the copyright has expired, or the copyright owner has expressly
relinquished the copyright.
return: (boolean) - ``true`` if this asset is in the public
domain, ``false`` otherwise. If ``true,``
``can_distribute_verbatim(),
can_distribute_alterations() and
can_distribute_compositions()`` must also be ``true``.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return bool(self._my_map['publicDomain'])
def get_copyright(self):
"""Gets the copyright statement and of this asset which identifies the current copyright holder.
For an asset in the public domain, this method may return the
original copyright statement although it may be no longer valid.
return: (osid.locale.DisplayText) - the copyright statement or
an empty string if none available. An empty string does
not imply the asset is not protected by copyright.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_title_template
return DisplayText(self._my_map['copyright'])
copyright_ = property(fget=get_copyright)
def get_copyright_registration(self):
"""Gets the copyright registration information for this asset.
return: (string) - the copyright registration. An empty string
means the registration status isn't known.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.get_url_template
if not bool(self._my_map['copyrightRegistration']):
raise errors.IllegalState()
return self._my_map['copyrightRegistration']
copyright_registration = property(fget=get_copyright_registration)
def can_distribute_verbatim(self):
"""Tests if there are any license restrictions on this asset that restrict the distribution, re-publication or public display of this asset, commercial or otherwise, without modification, alteration, or inclusion in other works.
This method is intended to offer consumers a means of filtering
out search results that restrict distribution for any purpose.
The scope of this method does not include licensing that
describes warranty disclaimers or attribution requirements. This
method is intended for informational purposes only and does not
replace or override the terms specified in a license agreement
which may specify exceptions or additional restrictions.
return: (boolean) - ``true`` if the asset can be distributed
verbatim, ``false`` otherwise.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.can_distribute_verbatim
if self._my_map['distributeVerbatim'] is None:
raise errors.IllegalState()
else:
return self._my_map['distributeVerbatim']
def can_distribute_alterations(self):
"""Tests if there are any license restrictions on this asset that restrict the distribution, re-publication or public display of any alterations or modifications to this asset, commercial or otherwise, for any purpose.
This method is intended to offer consumers a means of filtering
out search results that restrict the distribution or public
display of any modification or alteration of the content or its
metadata of any kind, including editing, translation,
resampling, resizing and cropping. The scope of this method does
not include licensing that describes warranty disclaimers or
attribution requirements. This method is intended for
informational purposes only and does not replace or override the
terms specified in a license agreement which may specify
exceptions or additional restrictions.
return: (boolean) - ``true`` if the asset can be modified,
``false`` otherwise. If ``true,``
``can_distribute_verbatim()`` must also be ``true``.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.can_distribute_verbatim
if self._my_map['distributeAlterations'] is None:
raise errors.IllegalState()
else:
return self._my_map['distributeAlterations']
def can_distribute_compositions(self):
"""Tests if there are any license restrictions on this asset that restrict the distribution, re-publication or public display of this asset as an inclusion within other content or composition, commercial or otherwise, for any purpose, including restrictions upon the distribution or license of the resulting composition.
This method is intended to offer consumers a means of filtering
out search results that restrict the use of this asset within
compositions. The scope of this method does not include
licensing that describes warranty disclaimers or attribution
requirements. This method is intended for informational purposes
only and does not replace or override the terms specified in a
license agreement which may specify exceptions or additional
restrictions.
return: (boolean) - ``true`` if the asset can be part of a
larger composition ``false`` otherwise. If ``true,``
``can_distribute_verbatim()`` must also be ``true``.
raise: IllegalState - ``is_copyright_status_known()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.can_distribute_verbatim
if self._my_map['distributeCompositions'] is None:
raise errors.IllegalState()
else:
return self._my_map['distributeCompositions']
def get_source_id(self):
"""Gets the ``Resource Id`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
An ``Asset`` is ``Sourceable`` and also contains a provider
identity. The provider is the entity that makes this digital
asset available in this repository but may or may not be the
publisher of the contents depicted in the asset. For example, a
map published by Ticknor and Fields in 1848 may have a provider
of Library of Congress and a source of Ticknor and Fields. If
copied from a repository at Middlebury College, the provider
would be Middlebury College and a source of Ticknor and Fields.
return: (osid.id.Id) - the source ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['sourceId']):
raise errors.IllegalState('this Asset has no source')
else:
return Id(self._my_map['sourceId'])
source_id = property(fget=get_source_id)
def get_source(self):
"""Gets the ``Resource`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
return: (osid.resource.Resource) - the source
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['sourceId']):
raise errors.IllegalState('this Asset has no source')
mgr = self._get_provider_manager('RESOURCE')
if not mgr.supports_resource_lookup():
raise errors.OperationFailed('Resource does not support Resource lookup')
lookup_session = mgr.get_resource_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bin_view()
osid_object = lookup_session.get_resource(self.get_source_id())
return osid_object
source = property(fget=get_source)
def get_provider_link_ids(self):
"""Gets the resource ``Ids`` representing the source of this asset in order from the most recent provider to the originating source.
return: (osid.id.IdList) - the provider ``Ids``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_asset_ids_template
return IdList(self._my_map['providerLinkIds'])
provider_link_ids = property(fget=get_provider_link_ids)
def get_provider_links(self):
"""Gets the ``Resources`` representing the source of this asset in order from the most recent provider to the originating source.
return: (osid.resource.ResourceList) - the provider chain
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_assets_template
if not bool(self._my_map['providerLinkIds']):
raise errors.IllegalState('no providerLinkIds')
mgr = self._get_provider_manager('RESOURCE')
if not mgr.supports_resource_lookup():
raise errors.OperationFailed('Resource does not support Resource lookup')
# What about the Proxy?
lookup_session = mgr.get_resource_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bin_view()
return lookup_session.get_resources_by_ids(self.get_provider_link_ids())
provider_links = property(fget=get_provider_links)
def get_created_date(self):
"""Gets the created date of this asset, which is generally not related to when the object representing the asset was created.
The date returned may indicate that not much is known.
return: (osid.calendaring.DateTime) - the created date
*compliance: mandatory -- This method must be implemented.*
"""
if self._my_map['createdDate'] is None:
raise errors.IllegalState('createdDate is None')
return self._my_map['createdDate']
created_date = property(fget=get_created_date)
def is_published(self):
"""Tests if this asset has been published.
Not all assets viewable in this repository may have been
published. The source of a published asset indicates the
publisher.
return: (boolean) - true if this asset has been published,
``false`` if unpublished or its published status is not
known
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return bool(self._my_map['published'])
def get_published_date(self):
"""Gets the published date of this asset.
Unpublished assets have no published date. A published asset has
a date available, however the date returned may indicate that
not much is known.
return: (osid.calendaring.DateTime) - the published date
raise: IllegalState - ``is_published()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
published_date = property(fget=get_published_date)
def get_principal_credit_string(self):
"""Gets the credits of the principal people involved in the production of this asset as a display string.
return: (osid.locale.DisplayText) - the principal credits
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_title_template
return DisplayText(self._my_map['principalCreditString'])
principal_credit_string = property(fget=get_principal_credit_string)
def get_asset_content_ids(self):
"""Gets the content ``Ids`` of this asset.
return: (osid.id.IdList) - the asset content ``Ids``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_asset_content_ids_template
id_list = []
for asset_content in self.get_asset_contents():
id_list.append(asset_content.get_id())
return IdList(id_list)
asset_content_ids = property(fget=get_asset_content_ids)
def get_asset_contents(self):
"""Gets the content of this asset.
return: (osid.repository.AssetContentList) - the asset contents
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_asset_contents_template
return AssetContentList(
self._my_map['assetContents'],
runtime=self._runtime,
proxy=self._proxy)
def _delete(self):
for asset_content in self.get_asset_contents():
asset_content._delete()
osid_objects.OsidObject._delete(self)
asset_contents = property(fget=get_asset_contents)
def is_composition(self):
"""Tetss if this asset is a representation of a composition of assets.
return: (boolean) - true if this asset is a composition,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return bool(self._my_map['compositionId'])
def get_composition_id(self):
"""Gets the ``Composition`` ``Id`` corresponding to this asset.
return: (osid.id.Id) - the composiiton ``Id``
raise: IllegalState - ``is_composition()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
if not bool(self._my_map['compositionId']):
raise errors.IllegalState('composition empty')
return Id(self._my_map['compositionId'])
composition_id = property(fget=get_composition_id)
def get_composition(self):
"""Gets the Composition corresponding to this asset.
return: (osid.repository.Composition) - the composiiton
raise: IllegalState - ``is_composition()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['compositionId']):
raise errors.IllegalState('composition empty')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_composition_lookup():
raise errors.OperationFailed('Repository does not support Composition lookup')
lookup_session = mgr.get_composition_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
return lookup_session.get_composition(self.get_composition_id())
composition = property(fget=get_composition)
@utilities.arguments_not_none
def get_asset_record(self, asset_record_type):
"""Gets the asset record corresponding to the given ``Asset`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``asset_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(asset_record_type)``
is ``true`` .
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.records.AssetRecord) - the asset record
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(asset_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(asset_record_type)
def get_object_map(self):
obj_map = dict(self._my_map)
obj_map['assetContent'] = obj_map['assetContents'] = [ac.object_map
for ac in self.get_asset_contents()]
# note: assetContent is deprecated
if obj_map['createdDate'] is not None:
created_date = obj_map['createdDate']
obj_map['createdDate'] = {
'year': created_date.year,
'month': created_date.month,
'day': created_date.day,
'hour': created_date.hour,
'minute': created_date.minute,
'second': created_date.second,
'microsecond': created_date.microsecond
}
return osid_objects.OsidObject.get_object_map(self, obj_map)
object_map = property(fget=get_object_map)
class AssetForm(abc_repository_objects.AssetForm, osid_objects.OsidObjectForm, osid_objects.OsidAggregateableForm, osid_objects.OsidSourceableForm):
"""This is the form for creating and updating ``Assets``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``AssetAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_namespace = 'repository.Asset'
def __init__(self, **kwargs):
osid_objects.OsidObjectForm.__init__(self, object_name='ASSET', **kwargs)
self._mdata = default_mdata.get_asset_mdata()
self._init_metadata(**kwargs)
if not self.is_for_update():
self._init_map(**kwargs)
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidSourceableForm._init_metadata(self)
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._copyright_registration_default = self._mdata['copyright_registration']['default_string_values'][0]
update_display_text_defaults(self._mdata['copyright'], self._locale_map)
self._copyright_default = dict(self._mdata['copyright']['default_string_values'][0])
update_display_text_defaults(self._mdata['title'], self._locale_map)
self._title_default = dict(self._mdata['title']['default_string_values'][0])
self._distribute_verbatim_default = self._mdata['distribute_verbatim']['default_boolean_values'][0]
self._created_date_default = self._mdata['created_date']['default_date_time_values'][0]
self._distribute_alterations_default = self._mdata['distribute_alterations']['default_boolean_values'][0]
update_display_text_defaults(self._mdata['principal_credit_string'], self._locale_map)
self._principal_credit_string_default = dict(self._mdata['principal_credit_string']['default_string_values'][0])
self._published_date_default = self._mdata['published_date']['default_date_time_values'][0]
self._source_default = self._mdata['source']['default_id_values'][0]
self._provider_links_default = self._mdata['provider_links']['default_id_values']
self._public_domain_default = self._mdata['public_domain']['default_boolean_values'][0]
self._distribute_compositions_default = self._mdata['distribute_compositions']['default_boolean_values'][0]
self._composition_default = self._mdata['composition']['default_id_values'][0]
self._published_default = self._mdata['published']['default_boolean_values'][0]
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidSourceableForm._init_map(self)
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['copyrightRegistration'] = self._copyright_registration_default
self._my_map['assignedRepositoryIds'] = [str(kwargs['repository_id'])]
self._my_map['copyright'] = self._copyright_default
self._my_map['title'] = self._title_default
self._my_map['distributeVerbatim'] = self._distribute_verbatim_default
self._my_map['createdDate'] = self._created_date_default
self._my_map['distributeAlterations'] = self._distribute_alterations_default
self._my_map['principalCreditString'] = self._principal_credit_string_default
self._my_map['publishedDate'] = self._published_date_default
self._my_map['sourceId'] = self._source_default
self._my_map['providerLinkIds'] = self._provider_links_default
self._my_map['publicDomain'] = self._public_domain_default
self._my_map['distributeCompositions'] = self._distribute_compositions_default
self._my_map['compositionId'] = self._composition_default
self._my_map['published'] = self._published_default
self._my_map['assetContents'] = []
def get_title_metadata(self):
"""Gets the metadata for an asset title.
return: (osid.Metadata) - metadata for the title
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['title'])
metadata.update({'existing_string_values': self._my_map['title']})
return Metadata(**metadata)
title_metadata = property(fget=get_title_metadata)
@utilities.arguments_not_none
def set_title(self, title):
"""Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['title'] = self._get_display_text(title, self.get_title_metadata())
def clear_title(self):
"""Removes the title.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.clear_title_template
if (self.get_title_metadata().is_read_only() or
self.get_title_metadata().is_required()):
raise errors.NoAccess()
self._my_map['title'] = dict(self._title_default)
title = property(fset=set_title, fdel=clear_title)
def get_public_domain_metadata(self):
"""Gets the metadata for the public domain flag.
return: (osid.Metadata) - metadata for the public domain
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['public_domain'])
metadata.update({'existing_boolean_values': self._my_map['publicDomain']})
return Metadata(**metadata)
public_domain_metadata = property(fget=get_public_domain_metadata)
@utilities.arguments_not_none
def set_public_domain(self, public_domain):
"""Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_public_domain_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(public_domain):
raise errors.InvalidArgument()
self._my_map['publicDomain'] = public_domain
def clear_public_domain(self):
"""Removes the public domain status.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_public_domain_metadata().is_read_only() or
self.get_public_domain_metadata().is_required()):
raise errors.NoAccess()
self._my_map['publicDomain'] = self._public_domain_default
public_domain = property(fset=set_public_domain, fdel=clear_public_domain)
def get_copyright_metadata(self):
"""Gets the metadata for the copyright.
return: (osid.Metadata) - metadata for the copyright
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['copyright'])
metadata.update({'existing_string_values': self._my_map['copyright']})
return Metadata(**metadata)
copyright_metadata = property(fget=get_copyright_metadata)
@utilities.arguments_not_none
def set_copyright(self, copyright_):
"""Sets the copyright.
arg: copyright (string): the new copyright
raise: InvalidArgument - ``copyright`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``copyright`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['copyright'] = self._get_display_text(copyright_, self.get_copyright_metadata())
def clear_copyright(self):
"""Removes the copyright.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.clear_title_template
if (self.get_copyright_metadata().is_read_only() or
self.get_copyright_metadata().is_required()):
raise errors.NoAccess()
self._my_map['copyright'] = dict(self._copyright_default)
copyright_ = property(fset=set_copyright, fdel=clear_copyright)
def get_copyright_registration_metadata(self):
"""Gets the metadata for the copyright registration.
return: (osid.Metadata) - metadata for the copyright
registration
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['copyright_registration'])
metadata.update({'existing_string_values': self._my_map['copyrightRegistration']})
return Metadata(**metadata)
copyright_registration_metadata = property(fget=get_copyright_registration_metadata)
@utilities.arguments_not_none
def set_copyright_registration(self, registration):
"""Sets the copyright registration.
arg: registration (string): the new copyright registration
raise: InvalidArgument - ``copyright`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``copyright`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.set_url_template
if self.get_copyright_registration_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_string(
registration,
self.get_copyright_registration_metadata()):
raise errors.InvalidArgument()
self._my_map['copyrightRegistration'] = registration
def clear_copyright_registration(self):
"""Removes the copyright registration.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.clear_url_template
if (self.get_copyright_registration_metadata().is_read_only() or
self.get_copyright_registration_metadata().is_required()):
raise errors.NoAccess()
self._my_map['copyrightRegistration'] = self._copyright_registration_default
copyright_registration = property(fset=set_copyright_registration, fdel=clear_copyright_registration)
def get_distribute_verbatim_metadata(self):
"""Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_verbatim'])
metadata.update({'existing_boolean_values': self._my_map['distributeVerbatim']})
return Metadata(**metadata)
distribute_verbatim_metadata = property(fget=get_distribute_verbatim_metadata)
@utilities.arguments_not_none
def set_distribute_verbatim(self, distribute_verbatim):
"""Sets the distribution rights.
arg: distribute_verbatim (boolean): right to distribute
verbatim copies
raise: InvalidArgument - ``distribute_verbatim`` is invalid
raise: NoAccess - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_distribute_verbatim_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(distribute_verbatim):
raise errors.InvalidArgument()
self._my_map['distributeVerbatim'] = distribute_verbatim
def clear_distribute_verbatim(self):
"""Removes the distribution rights.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_distribute_verbatim_metadata().is_read_only() or
self.get_distribute_verbatim_metadata().is_required()):
raise errors.NoAccess()
self._my_map['distributeVerbatim'] = self._distribute_verbatim_default
distribute_verbatim = property(fset=set_distribute_verbatim, fdel=clear_distribute_verbatim)
def get_distribute_alterations_metadata(self):
"""Gets the metadata for the distribute alterations rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_alterations'])
metadata.update({'existing_boolean_values': self._my_map['distributeAlterations']})
return Metadata(**metadata)
distribute_alterations_metadata = property(fget=get_distribute_alterations_metadata)
@utilities.arguments_not_none
def set_distribute_alterations(self, distribute_mods):
"""Sets the distribute alterations flag.
This also sets distribute verbatim to ``true``.
arg: distribute_mods (boolean): right to distribute
modifications
raise: InvalidArgument - ``distribute_mods`` is invalid
raise: NoAccess - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_distribute_alterations_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(distribute_mods):
raise errors.InvalidArgument()
self._my_map['distributeAlterations'] = distribute_mods
def clear_distribute_alterations(self):
"""Removes the distribution rights.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_distribute_alterations_metadata().is_read_only() or
self.get_distribute_alterations_metadata().is_required()):
raise errors.NoAccess()
self._my_map['distributeAlterations'] = self._distribute_alterations_default
distribute_alterations = property(fset=set_distribute_alterations, fdel=clear_distribute_alterations)
def get_distribute_compositions_metadata(self):
"""Gets the metadata for the distribute compositions rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_compositions'])
metadata.update({'existing_boolean_values': self._my_map['distributeCompositions']})
return Metadata(**metadata)
distribute_compositions_metadata = property(fget=get_distribute_compositions_metadata)
@utilities.arguments_not_none
def set_distribute_compositions(self, distribute_comps):
"""Sets the distribution rights.
This sets distribute verbatim to ``true``.
arg: distribute_comps (boolean): right to distribute
modifications
raise: InvalidArgument - ``distribute_comps`` is invalid
raise: NoAccess - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_distribute_compositions_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(distribute_comps):
raise errors.InvalidArgument()
self._my_map['distributeCompositions'] = distribute_comps
def clear_distribute_compositions(self):
"""Removes the distribution rights.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_distribute_compositions_metadata().is_read_only() or
self.get_distribute_compositions_metadata().is_required()):
raise errors.NoAccess()
self._my_map['distributeCompositions'] = self._distribute_compositions_default
distribute_compositions = property(fset=set_distribute_compositions, fdel=clear_distribute_compositions)
def get_source_metadata(self):
"""Gets the metadata for the source.
return: (osid.Metadata) - metadata for the source
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['source'])
metadata.update({'existing_id_values': self._my_map['sourceId']})
return Metadata(**metadata)
source_metadata = property(fget=get_source_metadata)
@utilities.arguments_not_none
def set_source(self, source_id):
"""Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_source_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(source_id):
raise errors.InvalidArgument()
self._my_map['sourceId'] = str(source_id)
def clear_source(self):
"""Removes the source.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_source_metadata().is_read_only() or
self.get_source_metadata().is_required()):
raise errors.NoAccess()
self._my_map['sourceId'] = self._source_default
source = property(fset=set_source, fdel=clear_source)
def get_provider_links_metadata(self):
"""Gets the metadata for the provider chain.
return: (osid.Metadata) - metadata for the provider chain
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['provider_links'])
metadata.update({'existing_provider_links_values': self._my_map['providerLinkIds']})
return Metadata(**metadata)
provider_links_metadata = property(fget=get_provider_links_metadata)
@utilities.arguments_not_none
def set_provider_links(self, resource_ids):
"""Sets a provider chain in order from the most recent source to the originating source.
arg: resource_ids (osid.id.Id[]): the new source
raise: InvalidArgument - ``resource_ids`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``resource_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.set_assets_template
if not isinstance(resource_ids, list):
raise errors.InvalidArgument()
if self.get_provider_links_metadata().is_read_only():
raise errors.NoAccess()
idstr_list = []
for object_id in resource_ids:
if not self._is_valid_id(object_id):
raise errors.InvalidArgument()
idstr_list.append(str(object_id))
self._my_map['providerLinkIds'] = idstr_list
def clear_provider_links(self):
"""Removes the provider chain.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.clear_assets_template
if (self.get_provider_links_metadata().is_read_only() or
self.get_provider_links_metadata().is_required()):
raise errors.NoAccess()
self._my_map['providerLinkIds'] = self._provider_links_default
provider_links = property(fset=set_provider_links, fdel=clear_provider_links)
def get_created_date_metadata(self):
"""Gets the metadata for the asset creation date.
return: (osid.Metadata) - metadata for the created date
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['created_date'])
metadata.update({'existing_date_time_values': self._my_map['createdDate']})
return Metadata(**metadata)
created_date_metadata = property(fget=get_created_date_metadata)
@utilities.arguments_not_none
def set_created_date(self, created_date):
"""Sets the created date.
arg: created_date (osid.calendaring.DateTime): the new
created date
raise: InvalidArgument - ``created_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``created_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_created_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(
created_date,
self.get_created_date_metadata()):
raise errors.InvalidArgument()
self._my_map['createdDate'] = created_date
def clear_created_date(self):
"""Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template
if (self.get_created_date_metadata().is_read_only() or
self.get_created_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map['createdDate'] = self._created_date_default
created_date = property(fset=set_created_date, fdel=clear_created_date)
def get_published_metadata(self):
"""Gets the metadata for the published status.
return: (osid.Metadata) - metadata for the published field
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['published'])
metadata.update({'existing_boolean_values': self._my_map['published']})
return Metadata(**metadata)
published_metadata = property(fget=get_published_metadata)
@utilities.arguments_not_none
def set_published(self, published):
"""Sets the published status.
arg: published (boolean): the published status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_published_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(published):
raise errors.InvalidArgument()
self._my_map['published'] = published
def clear_published(self):
"""Removes the published status.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_published_metadata().is_read_only() or
self.get_published_metadata().is_required()):
raise errors.NoAccess()
self._my_map['published'] = self._published_default
published = property(fset=set_published, fdel=clear_published)
def get_published_date_metadata(self):
"""Gets the metadata for the published date.
return: (osid.Metadata) - metadata for the published date
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['published_date'])
metadata.update({'existing_date_time_values': self._my_map['publishedDate']})
return Metadata(**metadata)
published_date_metadata = property(fget=get_published_date_metadata)
@utilities.arguments_not_none
def set_published_date(self, published_date):
"""Sets the published date.
arg: published_date (osid.calendaring.DateTime): the new
published date
raise: InvalidArgument - ``published_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_published_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(
published_date,
self.get_published_date_metadata()):
raise errors.InvalidArgument()
self._my_map['publishedDate'] = published_date
def clear_published_date(self):
"""Removes the puiblished date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template
if (self.get_published_date_metadata().is_read_only() or
self.get_published_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map['publishedDate'] = self._published_date_default
published_date = property(fset=set_published_date, fdel=clear_published_date)
def get_principal_credit_string_metadata(self):
"""Gets the metadata for the principal credit string.
return: (osid.Metadata) - metadata for the credit string
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['principal_credit_string'])
metadata.update({'existing_string_values': self._my_map['principalCreditString']})
return Metadata(**metadata)
principal_credit_string_metadata = property(fget=get_principal_credit_string_metadata)
@utilities.arguments_not_none
def set_principal_credit_string(self, credit_string):
"""Sets the principal credit string.
arg: credit_string (string): the new credit string
raise: InvalidArgument - ``credit_string`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``credit_string`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['principalCreditString'] = self._get_display_text(credit_string, self.get_principal_credit_string_metadata())
def clear_principal_credit_string(self):
"""Removes the principal credit string.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.clear_title_template
if (self.get_principal_credit_string_metadata().is_read_only() or
self.get_principal_credit_string_metadata().is_required()):
raise errors.NoAccess()
self._my_map['principalCreditString'] = dict(self._principal_credit_string_default)
principal_credit_string = property(fset=set_principal_credit_string, fdel=clear_principal_credit_string)
def get_composition_metadata(self):
"""Gets the metadata for linking this asset to a composition.
return: (osid.Metadata) - metadata for the composition
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['composition'])
metadata.update({'existing_id_values': self._my_map['compositionId']})
return Metadata(**metadata)
composition_metadata = property(fget=get_composition_metadata)
@utilities.arguments_not_none
def set_composition(self, composition_id):
"""Sets the composition.
arg: composition_id (osid.id.Id): a composition
raise: InvalidArgument - ``composition_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``composition_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_composition_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(composition_id):
raise errors.InvalidArgument()
self._my_map['compositionId'] = str(composition_id)
def clear_composition(self):
"""Removes the composition link.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_composition_metadata().is_read_only() or
self.get_composition_metadata().is_required()):
raise errors.NoAccess()
self._my_map['compositionId'] = self._composition_default
composition = property(fset=set_composition, fdel=clear_composition)
@utilities.arguments_not_none
def get_asset_form_record(self, asset_record_type):
"""Gets the ``AssetFormRecord`` corresponding to the given ``Asset`` record ``Type``.
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.records.AssetFormRecord) - the asset
form record
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(asset_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(asset_record_type)
class AssetList(abc_repository_objects.AssetList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssetList`` provides a means for accessing ``Asset`` elements sequentially either one at a time or many at a time.
Examples: while (al.hasNext()) { Asset asset = al.getNextAsset(); }
or
while (al.hasNext()) {
Asset[] assets = al.getNextAssets(al.available());
}
"""
def get_next_asset(self):
"""Gets the next ``Asset`` in this list.
return: (osid.repository.Asset) - the next ``Asset`` in this
list. The ``has_next()`` method should be used to test
that a next ``Asset`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return next(self)
def next(self):
return self._get_next_object(Asset)
__next__ = next
next_asset = property(fget=get_next_asset)
@utilities.arguments_not_none
def get_next_assets(self, n):
"""Gets the next set of ``Assets`` in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``Asset`` elements requested
which must be less than or equal to ``available()``
return: (osid.repository.Asset) - an array of ``Asset``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(AssetList, number=n)
class AssetContent(abc_repository_objects.AssetContent, osid_objects.OsidObject, osid_markers.Subjugateable):
"""``AssetContent`` represents a version of content represented by an ``Asset``.
Although ``AssetContent`` is a separate ``OsidObject`` with its own
``Id`` to distuinguish it from other content inside an ``Asset,
AssetContent`` can only be accessed through an ``Asset``.
Once an ``Asset`` is selected, multiple contents should be
negotiated using the size, fidelity, accessibility requirements or
application evnironment.
"""
_namespace = 'repository.AssetContent'
def __init__(self, **kwargs):
osid_objects.OsidObject.__init__(self, object_name='ASSET_CONTENT', **kwargs)
self._catalog_name = 'Repository'
def get_asset_id(self):
"""Gets the ``Asset Id`` corresponding to this content.
return: (osid.id.Id) - the asset ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
if not bool(self._my_map['assetId']):
raise errors.IllegalState('asset empty')
return Id(self._my_map['assetId'])
asset_id = property(fget=get_asset_id)
def get_asset(self):
"""Gets the ``Asset`` corresponding to this content.
return: (osid.repository.Asset) - the asset
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['assetId']):
raise errors.IllegalState('asset empty')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_asset_lookup():
raise errors.OperationFailed('Repository does not support Asset lookup')
lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
return lookup_session.get_asset(self.get_asset_id())
asset = property(fget=get_asset)
def get_accessibility_types(self):
"""Gets the accessibility types associated with this content.
return: (osid.type.TypeList) - list of content accessibility
types
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
accessibility_types = property(fget=get_accessibility_types)
def has_data_length(self):
"""Tests if a data length is available.
return: (boolean) - ``true`` if a length is available for this
content, ``false`` otherwise.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_data_length(self):
"""Gets the length of the data represented by this content in bytes.
return: (cardinal) - the length of the data stream
raise: IllegalState - ``has_data_length()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
data_length = property(fget=get_data_length)
def get_data(self):
"""Gets the asset content data.
return: (osid.transport.DataInputStream) - the length of the
content data
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if not bool(self._my_map['data']):
raise errors.IllegalState('no data')
dbase = JSONClientValidated('repository',
runtime=self._runtime).raw()
filesys = gridfs.GridFS(dbase)
return DataInputStream(filesys.get(self._my_map['data']))
data = property(fget=get_data)
def has_url(self):
"""Tests if a URL is associated with this content.
return: (boolean) - ``true`` if a URL is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.has_url_template
try:
return bool(self._my_map['url'])
except KeyError:
return False
def get_url(self):
"""Gets the URL associated with this content for web-based retrieval.
return: (string) - the url for this data
raise: IllegalState - ``has_url()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.get_url_template
if not bool(self._my_map['url']):
raise errors.IllegalState()
return self._my_map['url']
url = property(fget=get_url)
@utilities.arguments_not_none
def get_asset_content_record(self, asset_content_content_record_type):
"""Gets the asset content record corresponding to the given ``AssetContent`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``asset_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(asset_record_type)``
is ``true`` .
arg: asset_content_content_record_type (osid.type.Type): the
type of the record to retrieve
return: (osid.repository.records.AssetContentRecord) - the asset
content record
raise: NullArgument - ``asset_content_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(asset_content_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(asset_content_content_record_type)
def _delete(self):
dbase = JSONClientValidated('repository',
runtime=self._runtime).raw()
try:
filesys = gridfs.GridFS(dbase)
except TypeError:
# Not MongoDB, perhaps filesystem. Assume this is then taken care of in
# an adapter.
pass
else:
if self._my_map['data'] and filesys.exists(self._my_map['data']):
filesys.delete(self._my_map['data'])
osid_objects.OsidObject._delete(self)
class AssetContentForm(abc_repository_objects.AssetContentForm, osid_objects.OsidObjectForm, osid_objects.OsidSubjugateableForm):
"""This is the form for creating and updating content for ``AssetContent``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``AssetAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_namespace = 'repository.AssetContent'
def __init__(self, **kwargs):
osid_objects.OsidObjectForm.__init__(self, object_name='ASSET_CONTENT', **kwargs)
self._mdata = default_mdata.get_asset_content_mdata()
self._init_metadata(**kwargs)
if not self.is_for_update():
self._init_map(**kwargs)
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._url_default = self._mdata['url']['default_string_values'][0]
self._data_default = self._mdata['data']['default_object_values'][0]
self._accessibility_type_default = self._mdata['accessibility_type']['default_type_values'][0]
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['url'] = self._url_default
self._my_map['data'] = self._data_default
self._my_map['accessibilityTypeId'] = self._accessibility_type_default
self._my_map['assignedRepositoryIds'] = [str(kwargs['repository_id'])]
self._my_map['assetId'] = str(kwargs['asset_id'])
def get_accessibility_type_metadata(self):
"""Gets the metadata for an accessibility type.
return: (osid.Metadata) - metadata for the accessibility types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.logging.LogEntryForm.get_priority_metadata
metadata = dict(self._mdata['accessibility_type'])
metadata.update({'existing_type_values': self._my_map['accessibilityTypeId']})
return Metadata(**metadata)
accessibility_type_metadata = property(fget=get_accessibility_type_metadata)
@utilities.arguments_not_none
def add_accessibility_type(self, accessibility_type):
"""Adds an accessibility type.
Multiple types can be added.
arg: accessibility_type (osid.type.Type): a new accessibility
type
raise: InvalidArgument - ``accessibility_type`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``accessibility_t_ype`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def remove_accessibility_type(self, accessibility_type):
"""Removes an accessibility type.
arg: accessibility_type (osid.type.Type): accessibility type
to remove
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NotFound - acessibility type not found
raise: NullArgument - ``accessibility_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_accessibility_types(self):
"""Removes all accessibility types.
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
accessibility_types = property(fdel=clear_accessibility_types)
def get_data_metadata(self):
"""Gets the metadata for the content data.
return: (osid.Metadata) - metadata for the content data
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['data'])
metadata.update({'existing_object_values': self._my_map['data']})
return Metadata(**metadata)
data_metadata = property(fget=get_data_metadata)
@utilities.arguments_not_none
def set_data(self, data):
"""Sets the content data.
arg: data (osid.transport.DataInputStream): the content data
raise: InvalidArgument - ``data`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``data`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if data is None:
raise errors.NullArgument('data cannot be None')
if not isinstance(data, DataInputStream):
raise errors.InvalidArgument('data must be instance of DataInputStream')
dbase = JSONClientValidated('repository',
runtime=self._runtime).raw()
filesys = gridfs.GridFS(dbase)
self._my_map['data'] = filesys.put(data._my_data)
data._my_data.seek(0)
self._my_map['base64'] = base64.b64encode(data._my_data.read())
def clear_data(self):
"""Removes the content data.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_data_metadata().is_read_only() or
self.get_data_metadata().is_required()):
raise errors.NoAccess()
if self._my_map['data'] == self._data_default:
return
dbase = JSONClientValidated('repository',
runtime=self._runtime).raw()
filesys = gridfs.GridFS(dbase)
filesys.delete(self._my_map['data'])
self._my_map['data'] = self._data_default
del self._my_map['base64']
data = property(fset=set_data, fdel=clear_data)
def get_url_metadata(self):
"""Gets the metadata for the url.
return: (osid.Metadata) - metadata for the url
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['url'])
metadata.update({'existing_string_values': self._my_map['url']})
return Metadata(**metadata)
url_metadata = property(fget=get_url_metadata)
@utilities.arguments_not_none
def set_url(self, url):
"""Sets the url.
arg: url (string): the new copyright
raise: InvalidArgument - ``url`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.set_url_template
if self.get_url_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_string(
url,
self.get_url_metadata()):
raise errors.InvalidArgument()
self._my_map['url'] = url
def clear_url(self):
"""Removes the url.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.clear_url_template
if (self.get_url_metadata().is_read_only() or
self.get_url_metadata().is_required()):
raise errors.NoAccess()
self._my_map['url'] = self._url_default
url = property(fset=set_url, fdel=clear_url)
@utilities.arguments_not_none
def get_asset_content_form_record(self, asset_content_record_type):
"""Gets the ``AssetContentFormRecord`` corresponding to the given asset content record ``Type``.
arg: asset_content_record_type (osid.type.Type): an asset
content record type
return: (osid.repository.records.AssetContentFormRecord) - the
asset content form record
raise: NullArgument - ``asset_content_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(asset_content_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(asset_content_record_type)
class AssetContentList(abc_repository_objects.AssetContentList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssetContentList`` provides a means for accessing ``AssetContent`` elements sequentially either one at a time or many at a time.
Examples: while (acl.hasNext()) { AssetContent content =
acl.getNextAssetContent(); }
or
while (acl.hasNext()) {
AssetContent[] contents = acl.getNextAssetContents(acl.available());
}
"""
def get_next_asset_content(self):
"""Gets the next ``AssetContent`` in this list.
return: (osid.repository.AssetContent) - the next
``AssetContent`` in this list. The ``has_next()`` method
should be used to test that a next ``AssetContent`` is
available before calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return next(self)
def next(self):
return self._get_next_object(AssetContent)
__next__ = next
next_asset_content = property(fget=get_next_asset_content)
@utilities.arguments_not_none
def get_next_asset_contents(self, n):
"""Gets the next set of ``AssetContents`` in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``AssetContent`` elements
requested which must be less than or equal to
``available()``
return: (osid.repository.AssetContent) - an array of
``AssetContent`` elements.The length of the array is
less than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(AssetContentList, number=n)
class Composition(abc_repository_objects.Composition, osid_objects.OsidObject, osid_markers.Containable, osid_markers.Operable, osid_markers.Sourceable):
"""A ``Composition`` represents an authenticatable identity.
Like all OSID objects, a ``Composition`` is identified by its Id and
any persisted references should use the Id.
"""
_namespace = 'repository.Composition'
def __init__(self, **kwargs):
osid_objects.OsidObject.__init__(self, object_name='COMPOSITION', **kwargs)
self._catalog_name = 'Repository'
def get_children_ids(self):
"""Gets the child ``Ids`` of this composition.
return: (osid.id.IdList) - the composition child ``Ids``
*compliance: mandatory -- This method must be implemented.*
"""
return IdList(self._my_map['childIds'])
def get_child_ids(self):
return self.get_children_ids()
children_ids = property(fget=get_children_ids)
def get_children(self):
"""Gets the children of this composition.
return: (osid.repository.CompositionList) - the composition
children
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_assets_template
if not bool(self._my_map['childIds']):
raise errors.IllegalState('no childIds')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_composition_lookup():
raise errors.OperationFailed('Repository does not support Composition lookup')
# What about the Proxy?
lookup_session = mgr.get_composition_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
return lookup_session.get_compositions_by_ids(self.get_child_ids())
children = property(fget=get_children)
@utilities.arguments_not_none
def get_composition_record(self, composition_record_type):
"""Gets the composition record corresponding to the given ``Composition`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``composition_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(composition_record_type)`` is ``true`` .
arg: composition_record_type (osid.type.Type): a composition
record type
return: (osid.repository.records.CompositionRecord) - the
composition record
raise: NullArgument - ``composition_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(composition_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(composition_record_type)
def get_object_map(self):
obj_map = dict(self._my_map)
if 'assetIds' in obj_map:
del obj_map['assetIds']
return osid_objects.OsidObject.get_object_map(self, obj_map)
object_map = property(fget=get_object_map)
class CompositionForm(abc_repository_objects.CompositionForm, osid_objects.OsidObjectForm, osid_objects.OsidContainableForm, osid_objects.OsidOperableForm, osid_objects.OsidSourceableForm):
"""This is the form for creating and updating ``Compositions``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``CompositionAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_namespace = 'repository.Composition'
def __init__(self, **kwargs):
osid_objects.OsidObjectForm.__init__(self, object_name='COMPOSITION', **kwargs)
self._mdata = default_mdata.get_composition_mdata()
self._init_metadata(**kwargs)
if not self.is_for_update():
self._init_map(**kwargs)
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidContainableForm._init_metadata(self)
osid_objects.OsidSourceableForm._init_metadata(self)
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._children_default = self._mdata['children']['default_id_values']
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidContainableForm._init_map(self)
osid_objects.OsidSourceableForm._init_map(self)
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['childIds'] = self._children_default
self._my_map['assignedRepositoryIds'] = [str(kwargs['repository_id'])]
@utilities.arguments_not_none
def get_composition_form_record(self, composition_record_type):
"""Gets the ``CompositionFormRecord`` corresponding to the given repository record ``Type``.
arg: composition_record_type (osid.type.Type): a composition
record type
return: (osid.repository.records.CompositionFormRecord) - the
composition form record
raise: NullArgument - ``composition_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(composition_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(composition_record_type)
def get_children_metadata(self):
"""Gets the metadata for children.
return: (osid.Metadata) - metadata for the children
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._mdata['children'])
metadata.update({'existing_children_values': self._my_map['childIds']})
return Metadata(**metadata)
children_metadata = property(fget=get_children_metadata)
@utilities.arguments_not_none
def set_children(self, child_ids):
"""Sets the children.
arg: child_ids (osid.id.Id[]): the children``Ids``
raise: InvalidArgument - ``child_ids`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(child_ids, list):
raise errors.InvalidArgument()
if self.get_children_metadata().is_read_only():
raise errors.NoAccess()
idstr_list = []
for object_id in child_ids:
if not self._is_valid_id(object_id):
raise errors.InvalidArgument()
if str(object_id) not in idstr_list:
idstr_list.append(str(object_id))
self._my_map['childIds'] = idstr_list
def clear_children(self):
"""Clears the children.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_children_metadata().is_read_only() or
self.get_children_metadata().is_required()):
raise errors.NoAccess()
self._my_map['childIds'] = self._children_default
children = property(fset=set_children, fdel=clear_children)
class CompositionList(abc_repository_objects.CompositionList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``CompositionList`` provides a means for accessing ``Composition`` elements sequentially either one at a time or many at a time.
Examples: while (cl.hasNext()) { Composition composition =
cl.getNextComposition(); }
or
while (cl.hasNext()) {
Composition[] compositions = cl.getNextCompositions(cl.available());
}
"""
def get_next_composition(self):
"""Gets the next ``Composition`` in this list.
return: (osid.repository.Composition) - the next ``Composition``
in this list. The ``has_next()`` method should be used
to test that a next ``Composition`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return next(self)
def next(self):
return self._get_next_object(Composition)
__next__ = next
next_composition = property(fget=get_next_composition)
@utilities.arguments_not_none
def get_next_compositions(self, n):
"""Gets the next set of ``Composition`` elements in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``Composition`` elements
requested which must be less than or equal to
``available()``
return: (osid.repository.Composition) - an array of
``Composition`` elements.The length of the array is less
than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(CompositionList, number=n)
class Repository(abc_repository_objects.Repository, osid_objects.OsidCatalog):
"""A repository defines a collection of assets."""
_namespace = 'repository.Repository'
def __init__(self, **kwargs):
osid_objects.OsidCatalog.__init__(self, object_name='REPOSITORY', **kwargs)
@utilities.arguments_not_none
def get_repository_record(self, repository_record_type):
"""Gets the record corresponding to the given ``Repository`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``repository_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(repository_record_type)`` is ``true`` .
arg: repository_record_type (osid.type.Type): a repository
record type
return: (osid.repository.records.RepositoryRecord) - the
repository record
raise: NullArgument - ``repository_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(repository_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class RepositoryForm(abc_repository_objects.RepositoryForm, osid_objects.OsidCatalogForm):
"""This is the form for creating and updating repositories.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``RepositoryAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_namespace = 'repository.Repository'
def __init__(self, **kwargs):
osid_objects.OsidCatalogForm.__init__(self, object_name='REPOSITORY', **kwargs)
self._mdata = default_mdata.get_repository_mdata()
self._init_metadata(**kwargs)
if not self.is_for_update():
self._init_map(**kwargs)
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidCatalogForm._init_metadata(self, **kwargs)
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidCatalogForm._init_map(self, record_types, **kwargs)
@utilities.arguments_not_none
def get_repository_form_record(self, repository_record_type):
"""Gets the ``RepositoryFormRecord`` corresponding to the given repository record ``Type``.
arg: repository_record_type (osid.type.Type): a repository
record type
return: (osid.repository.records.RepositoryFormRecord) - the
repository form record
raise: NullArgument - ``repository_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(repository_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class RepositoryList(abc_repository_objects.RepositoryList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``RepositoryList`` provides a means for accessing ``Repository`` elements sequentially either one at a time or many at a time.
Examples: while (rl.hasNext()) { Repository repository =
rl.getNextRepository(); }
or
while (rl.hasNext()) {
Repository[] repositories = rl.getNextRepositories(rl.available());
}
"""
def get_next_repository(self):
"""Gets the next ``Repository`` in this list.
return: (osid.repository.Repository) - the next ``Repository``
in this list. The ``has_next()`` method should be used
to test that a next ``Repository`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return next(self)
def next(self):
return self._get_next_object(Repository)
__next__ = next
next_repository = property(fget=get_next_repository)
@utilities.arguments_not_none
def get_next_repositories(self, n):
"""Gets the next set of ``Repository`` elements in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``Repository`` elements
requested which must be less than or equal to
``available()``
return: (osid.repository.Repository) - an array of
``Repository`` elements.The length of the array is less
than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(RepositoryList, number=n)
class RepositoryNode(abc_repository_objects.RepositoryNode, osid_objects.OsidNode):
"""This interface is a container for a partial hierarchy retrieval.
The number of hierarchy levels traversable through this interface
depend on the number of levels requested in the
``RepositoryHierarchySession``.
"""
def __init__(self, node_map, runtime=None, proxy=None, lookup_session=None):
osid_objects.OsidNode.__init__(self, node_map)
self._lookup_session = lookup_session
self._runtime = runtime
self._proxy = proxy
def get_object_node_map(self):
node_map = dict(self.get_repository().get_object_map())
node_map['type'] = 'RepositoryNode'
node_map['parentNodes'] = []
node_map['childNodes'] = []
for repository_node in self.get_parent_repository_nodes():
node_map['parentNodes'].append(repository_node.get_object_node_map())
for repository_node in self.get_child_repository_nodes():
node_map['childNodes'].append(repository_node.get_object_node_map())
return node_map
def get_repository(self):
"""Gets the ``Repository`` at this node.
return: (osid.repository.Repository) - the repository
represented by this node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('REPOSITORY', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_repository_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_repository(Id(self._my_map['id']))
repository = property(fget=get_repository)
def get_parent_repository_nodes(self):
"""Gets the parents of this repository.
return: (osid.repository.RepositoryNodeList) - the parents of
the ``id``
*compliance: mandatory -- This method must be implemented.*
"""
parent_repository_nodes = []
for node in self._my_map['parentNodes']:
parent_repository_nodes.append(RepositoryNode(
node._my_map,
runtime=self._runtime,
proxy=self._proxy,
lookup_session=self._lookup_session))
return RepositoryNodeList(parent_repository_nodes)
parent_repository_nodes = property(fget=get_parent_repository_nodes)
def get_child_repository_nodes(self):
"""Gets the children of this repository.
return: (osid.repository.RepositoryNodeList) - the children of
this repository
*compliance: mandatory -- This method must be implemented.*
"""
parent_repository_nodes = []
for node in self._my_map['childNodes']:
parent_repository_nodes.append(RepositoryNode(
node._my_map,
runtime=self._runtime,
proxy=self._proxy,
lookup_session=self._lookup_session))
return RepositoryNodeList(parent_repository_nodes)
child_repository_nodes = property(fget=get_child_repository_nodes)
class RepositoryNodeList(abc_repository_objects.RepositoryNodeList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``RepositoryNodeList`` provides a means for accessing ``RepositoryNode`` elements sequentially either one at a time or many at a time.
Examples: while (rnl.hasNext()) { RepositoryNode node =
rnl.getNextRepositoryNode(); }
or
while (rnl.hasNext()) {
RepositoryNode[] nodes = rnl.getNextRepositoryNodes(rnl.available());
}
"""
def get_next_repository_node(self):
"""Gets the next ``RepositoryNode`` in this list.
return: (osid.repository.RepositoryNode) - the next
``RepositoryNode`` in this list. The ``has_next()``
method should be used to test that a next
``RepositoryNode`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return next(self)
def next(self):
return self._get_next_object(RepositoryNode)
__next__ = next
next_repository_node = property(fget=get_next_repository_node)
@utilities.arguments_not_none
def get_next_repository_nodes(self, n):
"""Gets the next set of ``RepositoryNode`` elements in this list which must be less than or equal to the return from ``available()``.
arg: n (cardinal): the number of ``RepositoryNode`` elements
requested which must be less than or equal to
``available()``
return: (osid.repository.RepositoryNode) - an array of
``RepositoryNode`` elements.The length of the array is
less than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(RepositoryNodeList, number=n)
|
{
"content_hash": "617ffb47a8d6c8eb50b688caf942d1c0",
"timestamp": "",
"source": "github",
"line_count": 2319,
"max_line_length": 328,
"avg_line_length": 42.91418714963346,
"alnum_prop": 0.6500030145300347,
"repo_name": "mitsei/dlkit",
"id": "23ff09c667159e2d8dc147ad52e156dfe75a1134",
"size": "99518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/json_/repository/objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
# pylint: disable=missing-function-docstring
"""Test for diff_cover.diff_quality - main"""
import pytest
from diff_cover.diff_quality_tool import main, parse_quality_args
def test_parse_with_html_report():
argv = ["--violations", "pycodestyle", "--html-report", "diff_cover.html"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("violations") == "pycodestyle"
assert arg_dict.get("html_report") == "diff_cover.html"
assert arg_dict.get("input_reports") == []
assert not arg_dict.get("ignore_unstaged")
assert arg_dict.get("diff_range_notation") == "..."
def test_parse_with_no_html_report():
argv = ["--violations", "pylint"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("violations") == "pylint"
assert arg_dict.get("input_reports") == []
assert not arg_dict.get("ignore_unstaged")
assert arg_dict.get("diff_range_notation") == "..."
def test_parse_with_one_input_report():
argv = ["--violations", "pylint", "pylint_report.txt"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("input_reports") == ["pylint_report.txt"]
def test_parse_with_multiple_input_reports():
argv = ["--violations", "pylint", "pylint_report_1.txt", "pylint_report_2.txt"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("input_reports") == [
"pylint_report_1.txt",
"pylint_report_2.txt",
]
def test_parse_with_options():
argv = [
"--violations",
"pycodestyle",
"--options=\"--exclude='*/migrations*'\"",
]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("options") == "\"--exclude='*/migrations*'\""
def test_parse_with_ignored_unstaged():
argv = ["--violations", "pylint", "--ignore-unstaged"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("ignore_unstaged")
def test_parse_invalid_arg():
# No code quality test provided
invalid_argv = [[], ["--html-report", "diff_cover.html"]]
for argv in invalid_argv:
with pytest.raises(SystemExit):
print(f"args = {argv}")
parse_quality_args(argv)
def _test_parse_with_path_patterns(name):
argv = ["--violations", "pep8"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("include") is None
argv = ["--violations", "pep8", f"--{name}", "noneed/*.py"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get(name) == ["noneed/*.py"]
argv = ["--violations", "pep8", f"--{name}", "noneed/*.py", "other/**/*.py"]
arg_dict = parse_quality_args(argv)
assert arg_dict.get(name) == ["noneed/*.py", "other/**/*.py"]
def test_parse_with_exclude():
_test_parse_with_path_patterns("exclude")
def test_parse_with_include():
_test_parse_with_path_patterns("include")
def test_parse_diff_range_notation():
argv = ["--violations", "pep8", "--diff-range-notation=.."]
arg_dict = parse_quality_args(argv)
assert arg_dict.get("violations") == "pep8"
assert arg_dict.get("html_report") is None
assert arg_dict.get("input_reports") == []
assert not arg_dict.get("ignore_unstaged")
assert arg_dict.get("diff_range_notation") == ".."
@pytest.fixture(autouse=True)
def patch_git_patch(mocker):
mocker.patch("diff_cover.diff_quality_tool.GitPathTool")
@pytest.fixture
def report_mock(mocker):
return mocker.patch(
"diff_cover.diff_quality_tool.generate_quality_report", return_value=100
)
def test_parse_options(report_mock):
_run_main(
report_mock,
[
"diff-quality",
"--violations",
"pylint",
'--options="--foobar"',
],
)
def test_parse_options_without_quotes(report_mock):
_run_main(
report_mock,
[
"diff-quality",
"--violations",
"pylint",
"--options=--foobar",
],
)
def _run_main(report, argv):
main(argv)
quality_reporter = report.call_args[0][0]
assert quality_reporter.driver.name == "pylint"
assert quality_reporter.options == "--foobar"
|
{
"content_hash": "004670b9226cf871e8eeb8b1f8014fff",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 83,
"avg_line_length": 27.326666666666668,
"alnum_prop": 0.6103927787265186,
"repo_name": "Bachmann1234/diff-cover",
"id": "a201b9a153c18d2af0e08fa3bb9e0b897b98647e",
"size": "4099",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_diff_quality_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3929"
},
{
"name": "HTML",
"bytes": "81842"
},
{
"name": "Python",
"bytes": "255213"
}
],
"symlink_target": ""
}
|
import unittest
from oslo.config import cfg
from gate.common import log as logging
from gate.process.pipeline import Pipeline
class FakeModule(object):
def __init__(self, func):
self.func = func
def __call__(self):
pass
def process(self, bundle):
return self.func(bundle)
class FakePipeline(Pipeline):
def __init__(self, name, func=None):
super(FakePipeline, self).__init__(name)
self._initialized = True
self.add_func(func)
def add_func(self, func):
if not func:
return
self._initialized = False
self.append(FakeModule(func))
self._initialized = True
def add_module(self, module):
self._initialized = False
module()
self.append(module)
self._initialized = True
class FakePipelineDriver(object):
def __init__(self):
self.pipelines = dict()
def __getitem__(self, key):
return self.get(key)
def get(self, name):
try:
pipeline = self.pipelines[name]
except:
pipeline = None
if not pipeline:
return None
return pipeline
def add_pipeline(self, pipeline, name=None):
if not name:
name = pipeline.name
self.pipelines[name] = pipeline
class BaseTestCase(unittest.TestCase):
def enableFakeTransport(self):
cfg.CONF.transport_driver = 'fake'
cfg.CONF.transport_url = 'fake:'
def setupLogging(self):
cfg.CONF.default_log_levels = ['stevedore=WARN', 'gate=WARN']
logging.setup('gate')
|
{
"content_hash": "14d19a4cc0e8b263afd5c0a27ec5b0ef",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 69,
"avg_line_length": 21.236842105263158,
"alnum_prop": 0.5929368029739777,
"repo_name": "vindeka/gate",
"id": "ac7f108a78606ce27edd63c6883d615ab64e2e07",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/gate/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "325743"
}
],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_x_csetoffset_sealed(BaseBERITestCase):
@attr('capabilities')
def test_cp2_x_csetoffset_sealed_1(self):
'''Test that CSetOffset on a sealed capability does not change the offsrt'''
self.assertRegisterEqual(self.MIPS.a0, 0, "CSetOffset changed the offset of a sealed capability")
@attr('capabilities')
def test_cp2_x_csetoffset_sealed_2(self):
'''Test that CSetOffset on a sealed capability raised an exception'''
self.assertRegisterEqual(self.MIPS.a2, 1, "CSetOffset on a sealed capability did not raise an exception")
@attr('capabilities')
def test_cp2_x_csetoffset_sealed_3(self):
'''Test that CSetOffset on a sealed capability sets CapCause'''
self.assertRegisterEqual(self.MIPS.a3, 0x0301, "CSetOffset on a sealed capability did not set CapCause correctly")
|
{
"content_hash": "3a51d881c45636ccd4e6e330d9dd3fd9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 122,
"avg_line_length": 48.73684210526316,
"alnum_prop": 0.7332613390928726,
"repo_name": "8l/beri",
"id": "52a23365f7fcf83a4863e9b7c0c3f29e1a17dc06",
"size": "2058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/cp2/test_cp2_x_csetoffset_sealed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from flask.ext.sqlalchemy import SQLAlchemy
from keg.signals import testing_run_start, db_init_pre, db_init_post, db_clear_pre, db_clear_post
from keg.utils import visit_modules
class KegSQLAlchemy(SQLAlchemy):
def init_app(self, app):
SQLAlchemy.init_app(self, app)
if app.testing:
self.testing_scoped_session()
def testing_scoped_session(self):
# don't want to have to import this if we are in production, so put import
# inside of the method
from flask.ext.webtest import get_scopefunc
# flask-sqlalchemy creates the session when the class is initialized. We have to re-create
# with different session options and override the session attribute with the new session
db.session = db.create_scoped_session(options={'scopefunc': get_scopefunc()})
def get_engines(self, app):
# the default engine doesn't have a bind
retval = [(None, self.get_engine(app))]
bind_names = app.config['SQLALCHEMY_BINDS']
# The default value of SQLALCHEMY_BINDS is None and the key is present b/c of
# Flask-SQLAlchemy defaults. So, only process the binds if they are not None.
if bind_names is not None:
for bind_name in bind_names:
retval.append((bind_name, self.get_engine(app, bind=bind_name)))
return retval
db = KegSQLAlchemy()
# put this import after the above db assignment to avoid circular reference issues
from .dialect_ops import DialectOperations
class DatabaseManager(object):
"""
A per-app-instance utility class that managers all common operations for a Keg app.
Binds & Dialects
----------------
Flask SQLAlchemy handles multiple DB connections per application through the use of "binds."
When an application wants to communicate events or initiate activites, this manager will
will handle distributing those events and activities to all database connections bound
to the application.
Furthermore, this manager delegates to DialectOperations instances to run the events and
activities in ways that are specific to the type of RDBMS being used (when needed).
"""
def __init__(self, app):
self.app = app
self.dialect_opts = app.config['KEG_DB_DIALECT_OPTIONS']
self.init_app()
self.init_events()
def init_app(self):
db.init_app(self.app)
visit_modules(self.app.db_visit_modules, self.app.import_name)
def init_events(self):
testing_run_start.connect(self.on_testing_start, sender=self.app)
def bind_dialect(self, bind_name):
engine = db.get_engine(self.app, bind=bind_name)
return DialectOperations.create_for(engine, bind_name, self.dialect_opts)
def all_bind_dialects(self):
"""
For each database connection (bind) in this application, yield a DialectOperations
instance corresponding to the type of RDBMS the bind is connecting to.
"""
for bind_name, engine in db.get_engines(self.app):
yield DialectOperations.create_for(engine, bind_name, self.dialect_opts)
def on_testing_start(self, app):
self.db_init_with_clear()
def drop_all(self):
db.session.remove()
for dialect in self.all_bind_dialects():
dialect.drop_all()
def prep_empty(self):
for dialect in self.all_bind_dialects():
dialect.prep_empty()
# The methods that follow will trigger application events.
def db_init_with_clear(self):
self.db_clear()
# todo: prep_empty should probably be an event
self.prep_empty()
self.db_init()
def db_init(self):
db_init_pre.send(self.app)
db.create_all()
db_init_post.send(self.app)
def db_clear(self):
db_clear_pre.send(self.app)
self.drop_all()
db_clear_post.send(self.app)
|
{
"content_hash": "6f159cbc1f9398c900598111c51b37c9",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 100,
"avg_line_length": 35.48245614035088,
"alnum_prop": 0.657601977750309,
"repo_name": "nZac/keg",
"id": "44e3944d5ab9014b8a8966dbea588da95b2a0226",
"size": "4045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keg/db/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "237"
},
{
"name": "JavaScript",
"bytes": "37"
},
{
"name": "Python",
"bytes": "122445"
}
],
"symlink_target": ""
}
|
import logging
from datetime import datetime
import click
from loqusdb.utils.migrate import migrate_database
from loqusdb.commands.cli import cli as base_command
LOG = logging.getLogger(__name__)
@base_command.command("migrate", short_help="Migrate an old loqusdb instance")
@click.pass_context
def migrate(
ctx,
):
"""Migrate an old loqusdb instance to 1.0"""
adapter = ctx.obj["adapter"]
start_time = datetime.now()
nr_updated = migrate_database(adapter)
LOG.info(
"All variants updated, time to complete migration: {}".format(datetime.now() - start_time)
)
LOG.info("Nr variants that where updated: %s", nr_updated)
|
{
"content_hash": "dbe0d9a25af4d6ee6cb071b9db3ee05b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 98,
"avg_line_length": 24.703703703703702,
"alnum_prop": 0.704647676161919,
"repo_name": "moonso/loqusdb",
"id": "8bb9fcf500d8dee2974fe66a4ccdc8922892fa66",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loqusdb/commands/migrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "216366"
}
],
"symlink_target": ""
}
|
"""A base class for contents managers."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from fnmatch import fnmatch
import itertools
import json
import os
import re
from tornado.web import HTTPError
from .checkpoints import Checkpoints
from traitlets.config.configurable import LoggingConfigurable
from nbformat import sign, validate, ValidationError
from nbformat.v4 import new_notebook
from ipython_genutils.importstring import import_item
from traitlets import (
Any,
Dict,
Instance,
List,
TraitError,
Type,
Unicode,
)
from ipython_genutils.py3compat import string_types
copy_pat = re.compile(r'\-Copy\d*\.')
class ContentsManager(LoggingConfigurable):
"""Base class for serving files and directories.
This serves any text or binary file,
as well as directories,
with special handling for JSON notebook documents.
Most APIs take a path argument,
which is always an API-style unicode path,
and always refers to a directory.
- unicode, not url-escaped
- '/'-separated
- leading and trailing '/' will be stripped
- if unspecified, path defaults to '',
indicating the root path.
"""
notary = Instance(sign.NotebookNotary)
def _notary_default(self):
return sign.NotebookNotary(parent=self)
hide_globs = List(Unicode(), [
u'__pycache__', '*.pyc', '*.pyo',
'.DS_Store', '*.so', '*.dylib', '*~',
], config=True, help="""
Glob patterns to hide in file and directory listings.
""")
untitled_notebook = Unicode("Untitled", config=True,
help="The base name used when creating untitled notebooks."
)
untitled_file = Unicode("untitled", config=True,
help="The base name used when creating untitled files."
)
untitled_directory = Unicode("Untitled Folder", config=True,
help="The base name used when creating untitled directories."
)
pre_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
To be called on a contents model prior to save.
This can be used to process the structure,
such as removing notebook outputs or other side effects that
should not be saved.
It will be called as (all arguments passed by keyword)::
hook(path=path, model=model, contents_manager=self)
- model: the model to be saved. Includes file contents.
Modifying this dict will affect the file that is stored.
- path: the API path of the save destination
- contents_manager: this ContentsManager instance
"""
)
def _pre_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.pre_save_hook = import_item(self.pre_save_hook)
elif new:
if not callable(new):
raise TraitError("pre_save_hook must be callable")
def run_pre_save_hook(self, model, path, **kwargs):
"""Run the pre-save hook if defined, and log errors"""
if self.pre_save_hook:
try:
self.log.debug("Running pre-save hook on %s", path)
self.pre_save_hook(model=model, path=path, contents_manager=self, **kwargs)
except Exception:
self.log.error("Pre-save hook failed on %s", path, exc_info=True)
checkpoints_class = Type(Checkpoints, config=True)
checkpoints = Instance(Checkpoints, config=True)
checkpoints_kwargs = Dict(config=True)
def _checkpoints_default(self):
return self.checkpoints_class(**self.checkpoints_kwargs)
def _checkpoints_kwargs_default(self):
return dict(
parent=self,
log=self.log,
)
# ContentsManager API part 1: methods that must be
# implemented in subclasses.
def dir_exists(self, path):
"""Does a directory exist at the given path?
Like os.path.isdir
Override this method in subclasses.
Parameters
----------
path : string
The path to check
Returns
-------
exists : bool
Whether the path does indeed exist.
"""
raise NotImplementedError
def is_hidden(self, path):
"""Is path a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root dir).
Returns
-------
hidden : bool
Whether the path is hidden.
"""
raise NotImplementedError
def file_exists(self, path=''):
"""Does a file exist at the given path?
Like os.path.isfile
Override this method in subclasses.
Parameters
----------
path : string
The API path of a file to check for.
Returns
-------
exists : bool
Whether the file exists.
"""
raise NotImplementedError('must be implemented in a subclass')
def exists(self, path):
"""Does a file or directory exist at the given path?
Like os.path.exists
Parameters
----------
path : string
The API path of a file or directory to check for.
Returns
-------
exists : bool
Whether the target exists.
"""
return self.file_exists(path) or self.dir_exists(path)
def get(self, path, content=True, type=None, format=None):
"""Get a file or directory model."""
raise NotImplementedError('must be implemented in a subclass')
def save(self, model, path):
"""
Save a file or directory model to path.
Should return the saved model with no content. Save implementations
should call self.run_pre_save_hook(model=model, path=path) prior to
writing any data.
"""
raise NotImplementedError('must be implemented in a subclass')
def delete_file(self, path):
"""Delete the file or directory at path."""
raise NotImplementedError('must be implemented in a subclass')
def rename_file(self, old_path, new_path):
"""Rename a file or directory."""
raise NotImplementedError('must be implemented in a subclass')
# ContentsManager API part 2: methods that have useable default
# implementations, but can be overridden in subclasses.
def delete(self, path):
"""Delete a file/directory and any associated checkpoints."""
path = path.strip('/')
if not path:
raise HTTPError(400, "Can't delete root")
self.delete_file(path)
self.checkpoints.delete_all_checkpoints(path)
def rename(self, old_path, new_path):
"""Rename a file and any checkpoints associated with that file."""
self.rename_file(old_path, new_path)
self.checkpoints.rename_all_checkpoints(old_path, new_path)
def update(self, model, path):
"""Update the file's path
For use in PATCH requests, to enable renaming a file without
re-uploading its contents. Only used for renaming at the moment.
"""
path = path.strip('/')
new_path = model.get('path', path).strip('/')
if path != new_path:
self.rename(path, new_path)
model = self.get(new_path, content=False)
return model
def info_string(self):
return "Serving contents"
def get_kernel_path(self, path, model=None):
"""Return the API path for the kernel
KernelManagers can turn this value into a filesystem path,
or ignore it altogether.
The default value here will start kernels in the directory of the
notebook server. FileContentsManager overrides this to use the
directory containing the notebook.
"""
return ''
def increment_filename(self, filename, path='', insert=''):
"""Increment a filename until it is unique.
Parameters
----------
filename : unicode
The name of a file, including extension
path : unicode
The API path of the target's directory
Returns
-------
name : unicode
A filename that is unique, based on the input filename.
"""
path = path.strip('/')
basename, ext = os.path.splitext(filename)
for i in itertools.count():
if i:
insert_i = '{}{}'.format(insert, i)
else:
insert_i = ''
name = u'{basename}{insert}{ext}'.format(basename=basename,
insert=insert_i, ext=ext)
if not self.exists(u'{}/{}'.format(path, name)):
break
return name
def validate_notebook_model(self, model):
"""Add failed-validation message to model"""
try:
validate(model['content'])
except ValidationError as e:
model['message'] = u'Notebook Validation failed: {}:\n{}'.format(
e.message, json.dumps(e.instance, indent=1, default=lambda obj: '<UNKNOWN>'),
)
return model
def new_untitled(self, path='', type='', ext=''):
"""Create a new untitled file or directory in path
path must be a directory
File extension can be specified.
Use `new` to create files with a fully specified path (including filename).
"""
path = path.strip('/')
if not self.dir_exists(path):
raise HTTPError(404, 'No such directory: %s' % path)
model = {}
if type:
model['type'] = type
if ext == '.ipynb':
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
insert = ''
if model['type'] == 'directory':
untitled = self.untitled_directory
insert = ' '
elif model['type'] == 'notebook':
untitled = self.untitled_notebook
ext = '.ipynb'
elif model['type'] == 'file':
untitled = self.untitled_file
else:
raise HTTPError(400, "Unexpected model type: %r" % model['type'])
name = self.increment_filename(untitled + ext, path, insert=insert)
path = u'{0}/{1}'.format(path, name)
return self.new(model, path)
def new(self, model=None, path=''):
"""Create a new file or directory and return its model with no content.
To create a new untitled entity in a directory, use `new_untitled`.
"""
path = path.strip('/')
if model is None:
model = {}
if path.endswith('.ipynb'):
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
# no content, not a directory, so fill out new-file model
if 'content' not in model and model['type'] != 'directory':
if model['type'] == 'notebook':
model['content'] = new_notebook()
model['format'] = 'json'
else:
model['content'] = ''
model['type'] = 'file'
model['format'] = 'text'
model = self.save(model, path)
return model
def copy(self, from_path, to_path=None):
"""Copy an existing file and return its new model.
If to_path not specified, it will be the parent directory of from_path.
If to_path is a directory, filename will increment `from_path-Copy#.ext`.
from_path must be a full path to a file.
"""
path = from_path.strip('/')
if to_path is not None:
to_path = to_path.strip('/')
if '/' in path:
from_dir, from_name = path.rsplit('/', 1)
else:
from_dir = ''
from_name = path
model = self.get(path)
model.pop('path', None)
model.pop('name', None)
if model['type'] == 'directory':
raise HTTPError(400, "Can't copy directories")
if to_path is None:
to_path = from_dir
if self.dir_exists(to_path):
name = copy_pat.sub(u'.', from_name)
to_name = self.increment_filename(name, to_path, insert='-Copy')
to_path = u'{0}/{1}'.format(to_path, to_name)
model = self.save(model, to_path)
return model
def log_info(self):
self.log.info(self.info_string())
def trust_notebook(self, path):
"""Explicitly trust a notebook
Parameters
----------
path : string
The path of a notebook
"""
model = self.get(path)
nb = model['content']
self.log.warn("Trusting notebook %s", path)
self.notary.mark_cells(nb, True)
self.save(model, path)
def check_and_sign(self, nb, path=''):
"""Check for trusted cells, and sign the notebook.
Called as a part of saving notebooks.
Parameters
----------
nb : dict
The notebook dict
path : string
The notebook's path (for logging)
"""
if self.notary.check_cells(nb):
self.notary.sign(nb)
else:
self.log.warn("Saving untrusted notebook %s", path)
def mark_trusted_cells(self, nb, path=''):
"""Mark cells as trusted if the notebook signature matches.
Called as a part of loading notebooks.
Parameters
----------
nb : dict
The notebook object (in current nbformat)
path : string
The notebook's path (for logging)
"""
trusted = self.notary.check_signature(nb)
if not trusted:
self.log.warn("Notebook %s is not trusted", path)
self.notary.mark_cells(nb, trusted)
def should_list(self, name):
"""Should this file/directory name be displayed in a listing?"""
return not any(fnmatch(name, glob) for glob in self.hide_globs)
# Part 3: Checkpoints API
def create_checkpoint(self, path):
"""Create a checkpoint."""
return self.checkpoints.create_checkpoint(self, path)
def restore_checkpoint(self, checkpoint_id, path):
"""
Restore a checkpoint.
"""
self.checkpoints.restore_checkpoint(self, checkpoint_id, path)
def list_checkpoints(self, path):
return self.checkpoints.list_checkpoints(path)
def delete_checkpoint(self, checkpoint_id, path):
return self.checkpoints.delete_checkpoint(checkpoint_id, path)
|
{
"content_hash": "e5a921fd60110e3264ac7b0c8b77283d",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 93,
"avg_line_length": 31.422505307855626,
"alnum_prop": 0.5776351351351351,
"repo_name": "fzheng/codejam",
"id": "76db41c0bfda78d8865403de9f2f7d19132346a3",
"size": "14800",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/notebook/services/contents/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26929"
},
{
"name": "CSS",
"bytes": "70961"
},
{
"name": "HTML",
"bytes": "80615"
},
{
"name": "Java",
"bytes": "376384"
},
{
"name": "JavaScript",
"bytes": "5201764"
},
{
"name": "Jupyter Notebook",
"bytes": "13408"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "Python",
"bytes": "16542061"
},
{
"name": "Smarty",
"bytes": "22430"
},
{
"name": "TeX",
"bytes": "85477"
}
],
"symlink_target": ""
}
|
"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version"""
import sys
def Python():
if sys.platform == 'cli': #IronPython
import System
return System.IntPtr.Size == 8
else:
try:
return sys.maxsize > 2147483647
except AttributeError:
return sys.maxint > 2147483647
def os():
import platform
pm = platform.machine()
if pm != '..' and pm.endswith('64'): # recent Python (not Iron)
return True
else:
import os
if 'PROCESSOR_ARCHITEW6432' in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
return os.environ['PROCESSOR_ARCHITECTURE'].endswith('64') # 64 bit Windows 64 bit program
except IndexError:
pass # not Windows
try:
return '64' in platform.architecture()[0] # this often works in Linux
except:
return False # is an older version of Python, assume also an older os (best we can guess)
if __name__ == "__main__":
print(("is64bit.Python() =", Python(), "is64bit.os() =", os()))
|
{
"content_hash": "39e6b341b525903005b972372deb5304",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 105,
"avg_line_length": 35.15151515151515,
"alnum_prop": 0.5853448275862069,
"repo_name": "ArcherSys/ArcherSys",
"id": "33c3ead6dc2bdb917026aa82a1161d900ccad5d2",
"size": "1160",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/adodbapi/is64bit.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from modelcluster.fields import ParentalKey
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, FieldRowPanel,
InlinePanel, MultiFieldPanel
)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailforms.edit_handlers import FormSubmissionsPanel
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='custom_form_fields')
class FormPage(AbstractEmailForm):
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
content_panels = AbstractEmailForm.content_panels + [
FormSubmissionsPanel(),
FieldPanel('intro', classname="full"),
InlinePanel('custom_form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldRowPanel([
FieldPanel('from_address', classname="col6"),
FieldPanel('to_address', classname="col6"),
]),
FieldPanel('subject'),
], "Email"),
]
def get_form_fields(self):
return self.custom_form_fields.all()
|
{
"content_hash": "6ee84dd4252e6f750b11af3dc5c7c788",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.689625850340136,
"repo_name": "joecheng511/try-wagtail",
"id": "affec1ad9de7ae67f56cce5e519b72d0e089404e",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/register/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "197606"
},
{
"name": "HTML",
"bytes": "8437"
},
{
"name": "JavaScript",
"bytes": "185435"
},
{
"name": "Python",
"bytes": "37442"
}
],
"symlink_target": ""
}
|
import os, time, webapp2, jinja2
import session, logging, json
import settings
from inspect import currentframe, getframeinfo
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
import read_tweepy
from models import *
class MainPage(session.BaseRequestHandler):
def get(self, command = ''):
def lookup(dict_or_obj, member):
try:
# could be a dictionary or a NoneType
member_value = dict_or_obj[member]
except (KeyError, TypeError):
try:
# could have it as an attribute
member_value = getattr(dict_or_obj, member)
except AttributeError:
member_value = False
return member_value
this_app = AppOpenLSH.get_or_insert('KeyOpenLSH')
app_is_open = this_app.is_open
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s app_is_open, %s', frameinfo.filename, frameinfo.lineno+1, app_is_open)
tw_logged_in = False
if this_app.twitter_access_token_key:
tw_logged_in = True
self.session['tw_logged_in'] = True
self.session['auth.access_token.key'] = this_app.twitter_access_token_key
self.session['auth.access_token.secret'] = this_app.twitter_access_token_secret
app_is_closed = False
u = users.get_current_user()
if u:
demo_user = DemoUser.get_or_insert(u.user_id(), email = u.email(), nickname = u.nickname())
ulogged = 'User not logged in' if not u else 'User is %s' % u.nickname()
app_status = 'App is Open' if app_is_open else "App is Closed"
if users.is_current_user_admin():
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s Admin User, %s', frameinfo.filename, frameinfo.lineno+1, app_status)
url = users.create_logout_url(self.request.uri)
url_linktext = 'Google Logout'
elif u and app_is_open:
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s %s %s', frameinfo.filename, frameinfo.lineno+1, ulogged, app_status)
url = users.create_logout_url(self.request.uri)
url_linktext = 'Google Logout'
elif not u:
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s %s %s', frameinfo.filename, frameinfo.lineno+1, ulogged, app_status)
url = users.create_login_url(self.request.uri)
url_linktext = 'Google Login -- use your Gmail'
else:
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s %s %s', frameinfo.filename, frameinfo.lineno+1, ulogged, app_status)
url = users.create_logout_url(self.request.uri)
url_linktext = 'Google Logout'
app_is_closed = not app_is_open
tw_banner = ''
if tw_logged_in:
tw_banner = 'Ready for Tweets'
tweets = []
tweet_display = ''
if not app_is_closed:
duik = lookup(self.session, 'duik')
dui = ndb.Key(urlsafe = duik).get() if duik else None
if not dui:
dui = DemoUserInteraction.latest_for_user( u )
self.session['duik'] = dui.key.urlsafe() if dui else None
if dui:
tweets = dui.tweets
tw_banner = '%d Tweets as of %s' % (len(tweets), dui.asof.isoformat(' ')[:19])
tweet_display = '<br/>\n— '.join(tweets)
else:
dui = None
similar_sets, same_sets, accounted_ids = ([], [], [])
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s %s', frameinfo.filename, frameinfo.lineno+1, similar_sets)
if lookup(dui, 'calc_done'):
read_tweepy.LshTweets.show(self.session)
similar_sets, same_sets, accounted_ids = self.session['lsh_results']
frameinfo = getframeinfo(currentframe())
logging.info('file %s, line %s %s %s', frameinfo.filename, frameinfo.lineno+1, similar_sets, same_sets)
try:
if command == 'show_lsh_results':
# matched_tweets = [tweets[twid] for twid in range(len(tweets)) if twid in self.session['lsh_results'][1]]
other_tweets = [tweets[twid] for twid in range(len(tweets)) if twid not in self.session['lsh_results'][1]]
tweet_display = '<br/>\n— '.join(other_tweets)
except: pass
template_values = {
'app_is_closed': app_is_closed,
'google_logged_in': u,
'url': url,
'url_linktext': url_linktext,
'tw_logged_in': tw_logged_in,
'tw_banner': tw_banner,
'similar_sets': similar_sets,
'same_sets': same_sets,
'tweets': tweets,
'fetching': lookup(dui, 'fetching'),
'calculating': lookup(dui, 'calculating'),
'calc_done': lookup(dui, 'calc_done'),
'gaCode': settings.gaCode,
}
template = JINJA_ENVIRONMENT.get_template('tweets_index.html')
try:
self.response.write(template.render(template_values))
except UnicodeDecodeError:
template_values['tweets'] = ['unreadable content']
self.response.write(template.render(template_values))
def post(self):
cmd = self.request.get('command')
if cmd == 'calc_lsh':
read_tweepy.LshTweets.calc(self.session)
elif cmd == 'show_lsh_results':
read_tweepy.LshTweets.show(self.session)
self.get(cmd)
class WaitPage(session.BaseRequestHandler):
def get(self):
template_values = {
'gaCode': settings.gaCode,
}
template = JINJA_ENVIRONMENT.get_template('coming_soon.html')
try:
self.response.write(template.render(template_values))
except UnicodeDecodeError:
template_values['tweets'] = 'unreadable content'
self.response.write(template.render(template_values))
def post(self):
pass
urls = [
('/', MainPage),
('/coming_soon', WaitPage),
]
import read_tweepy
urls += read_tweepy.urls
import blobs
urls += blobs.urls
import mr_main
urls += mr_main.urls
import peer_belt_driver
urls += peer_belt_driver.urls
import test_db_datastore
urls += test_db_datastore.urls
sess_config = {}
sess_config['webapp2_extras.sessions'] = {
'secret_key': 'dcd99df0-824a-4331-9a55-2d5900e27732'
}
application = webapp2.WSGIApplication(urls, debug=True, config=sess_config)
|
{
"content_hash": "4ef8957827450768116b3251c5575048",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 126,
"avg_line_length": 40.45614035087719,
"alnum_prop": 0.5900549291702805,
"repo_name": "datasciencedev/locality-sensitive-hashing",
"id": "010199825af6fb757898cf5bec3f8fd5d9810256",
"size": "6918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19624"
},
{
"name": "HTML",
"bytes": "698473"
},
{
"name": "JavaScript",
"bytes": "35165"
},
{
"name": "Python",
"bytes": "1668137"
}
],
"symlink_target": ""
}
|
"""Flags and helpers for the compute routes commands."""
from googlecloudsdk.command_lib.compute import flags as compute_flags
HOSTS_ARG = compute_flags.ResourceArgument(
resource_name='host',
completion_resource_id='compute.hosts',
plural=True,
zonal_collection='compute.hosts')
|
{
"content_hash": "1c87535239affb144edd50e3a838bc1d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 29.9,
"alnum_prop": 0.745819397993311,
"repo_name": "Sorsly/subtle",
"id": "f8dcaee999ebc8d736500b062da70e905a40d30d",
"size": "894",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/sole_tenant_hosts/flags.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
}
|
from .orion import parse_orion
|
{
"content_hash": "3bc1ca3627639a4dba8952916858c555",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "astrofrog/hyperion",
"id": "e62a25962c26e93b18a28ed5fccdc965fc0e07e8",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperion/importers/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "31958"
},
{
"name": "CSS",
"bytes": "97"
},
{
"name": "Fortran",
"bytes": "529980"
},
{
"name": "Python",
"bytes": "779928"
}
],
"symlink_target": ""
}
|
import json
from db_conn import redis_db
class TicTacToe(object):
def __init__(self):
self.winning_combos = []
# Define the winning combinations by row
for row in range(3):
win_set = set()
for col in range(3):
win_set.add(str(row) + "," + str(col))
self.winning_combos.append(win_set)
# Define the winning combinations by Col
for col in range(3):
win_set = set()
for row in range(3):
win_set.add(str(row) + "," + str(col))
self.winning_combos.append(win_set)
# Define the winning combinations by Diagonal
self.winning_combos.append(set(["0,0", "1,1", "2,2"]))
self.winning_combos.append(set(["0,2", "1,1", "2,0"]))
def gen_key(self):
counter = redis_db.incr("handle:count", 1)
counter = int(counter) + 1
player_key = "player:" + str(counter)
return player_key
def init_player_data(self, handle, status="new", pair=""):
player_data = {}
player_data["handle"] = handle
player_data["status"] = status
player_data["pair"] = pair
player_data["my_moves"] = ""
return player_data
def new_player(self):
player_key = self.gen_key()
player_data = self.init_player_data(player_key, "new")
redis_db.hmset(player_key, player_data)
return player_key
def player_ready(self, player_key):
# Find another player that"s ready to play
paired_key = None
player_data = self.init_player_data(player_key, status="ready")
redis_db.hmset(player_key, player_data)
for player2_key in redis_db.keys("player:*"):
if player2_key != player_key \
and redis_db.hget(player2_key, "status") == "ready":
# Pair the Players and send them messages
player_data = self.init_player_data(player_key, "paired", player2_key)
redis_db.hmset(player_key, player_data)
player2_data = self.init_player_data(player2_key, "paired", player_key)
redis_db.hmset(player2_key, player2_data)
# Send Paired to both players
# print 'pair player: ', player_key, ' and ', player2_key
data = {}
data["action"] = "paired"
data["pair"] = player2_key
redis_db.publish(player_key, json.dumps(data))
data = {}
data["action"] = "paired"
data["pair"] = player_key
redis_db.publish(player2_key, json.dumps(data))
# print 'Start Game : ', player_key, ' and ', player2_key
# Send message to both players
data = {}
data["action"] = "game-start"
data["next_handle"] = player_key
data["valid-moves"] = self.open_positions()
# Send message to channel of both players
for channel_key, paired_key in [(player_key, player2_key),
(player2_key, player_key)]:
redis_db.publish(channel_key, json.dumps(data))
# print 'Start Game Done : ', player_key, ' and ', player2_key
paired_key = player2_key
break
return paired_key
def open_positions(self, moves_player_a="", moves_player_b=""):
"""Returns List of Open positions"""
open_loc = set()
for row in range(3):
for col in range(3):
open_loc.add(str(row) + ',' + str(col))
moves_player_a = moves_player_a.split(";")
moves_player_b = moves_player_b.split(";")
open_loc = open_loc - set(moves_player_a)
open_loc = open_loc - set(moves_player_b)
return ';'.join(list(open_loc))
def check_result(self, moves_player_a, moves_player_b):
"""Returns True if Game has ended with a Winner or Draw, else returns False"""
result = None
moves_player_a = moves_player_a.split(";")
moves_player_b = moves_player_b.split(";")
for win_set in self.winning_combos:
if win_set.issubset(moves_player_a):
result = "won"
break
elif win_set.issubset(moves_player_b):
result = "lost"
break
if not result:
rem_positions = 9 - len(moves_player_a) - len(moves_player_b)
if rem_positions == 0:
result = "draw"
return result
def player_move(self, player_key, data):
player_data = redis_db.hgetall(player_key)
if player_data["my_moves"]:
player_data["my_moves"] += ";" + data["move"]
else:
player_data["my_moves"] = data["move"]
redis_db.hset(player_key, "my_moves", player_data["my_moves"])
player2_key = player_data["pair"]
player2_data = redis_db.hgetall(player2_key)
# Put the message on the channel of Player2
redis_db.publish(player2_key, json.dumps(data))
# Check the game result
moves_player_1 = player_data["my_moves"]
moves_player_2 = player2_data["my_moves"]
result = self.check_result(moves_player_1, moves_player_2)
if not result:
open_positions = self.open_positions(moves_player_1, moves_player_2)
if not open_positions:
result = "draw"
if result:
player_data["status"] = "game-end"
player2_data["status"] = "game-end"
redis_db.hmset(player_key, player_data)
redis_db.hmset(player2_key, player2_data)
# Send the result on the channel
data = {}
data["action"] = "game-end"
data["next_handle"] = ""
data["result"] = result
if result == "draw":
data["win_handle"] = ""
elif result == "won":
data["win_handle"] = player_key
elif result == "lost":
data["win_handle"] = player2_key
else:
data = {}
data["action"] = "valid-moves"
data["next_handle"] = player_data["pair"]
data["valid-moves"] = open_positions
for channel_key in [player_key, player2_key]:
redis_db.publish(channel_key, json.dumps(data))
def get_paired(self, player_key):
return redis_db.hget(player_key, "pair")
def set_player_data(self, player_key, **data):
redis_db.hmset(player_key, data)
return True
def get_all_players(self):
player_keys = redis_db.keys("player:*")
players = {}
for player_key in player_keys:
players[player_key] = redis_db.hgetall(player_key)
return players
def game_over(self, player_key):
# Remove the player
player2_key = redis_db.hget(player_key, "pair")
player_data = self.init_player_data(player_key, "new")
redis_db.hmset(player_key, player_data)
player_data = self.init_player_data(player2_key, "new")
redis_db.hmset(player2_key, player_data)
def remove(self, player_key):
# Remove the player
return redis_db.delete(player_key)
|
{
"content_hash": "c089a069f02b638b98dec14e1d0b5887",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 87,
"avg_line_length": 39.72282608695652,
"alnum_prop": 0.5386509782459981,
"repo_name": "sampathweb/game-server",
"id": "60cf0d23a7c3bca245830303f160e7e478623eb3",
"size": "7309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tictactoe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57"
},
{
"name": "Python",
"bytes": "38149"
}
],
"symlink_target": ""
}
|
"""ResNet model for classifying images from Imagenet dataset.
Support single-host training with one or multiple devices.
ResNet as proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import division
from __future__ import print_function
import argparse
import functools
import itertools
import os
import json
import imagenet
import imagenet_utils
import resnet_model
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
_NUM_IMAGES = {
'train': 1281167,
'validation': 50000,
}
def get_model_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build the resnet model."""
def _resnet_model_fn(features, labels, mode, params):
"""Resnet model body.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
weight_decay = params['weight_decay']
momentum = params['momentum']
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params['data_format']
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
loss, preds = _tower_fn(
is_training, weight_decay, features, labels,
data_format, params['resnet_size'], params['batch_norm_decay'],
params['batch_norm_epsilon'])
batches_per_epoch = _NUM_IMAGES['train'] / (params['train_batch_size'] * num_workers)
boundaries = [
int(batches_per_epoch * epoch) for epoch in [30, 60, 80, 90]]
staged_lr = [params['learning_rate'] * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]
learning_rate = tf.compat.v1.train.piecewise_constant(tf.compat.v1.train.get_global_step(),
boundaries, staged_lr)
tf.identity(learning_rate, name='learning_rate')
lr_s = tf.compat.v1.summary.scalar('learning_rate', learning_rate)
loss = tf.reduce_mean(loss, name='loss')
examples_sec_hook = imagenet_utils.ExamplesPerSecondHook(
params['train_batch_size'], every_n_steps=10)
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
train_op = optimizer.minimize(loss, global_step=tf.compat.v1.train.get_global_step())
predictions = {
'classes': preds['classes'],
'probabilities': preds['probabilities'],
'top5_class': preds['top5_classes'],
}
stacked_labels = labels
accuracy = tf.compat.v1.metrics.accuracy(stacked_labels, predictions['classes'])
#accuracy_5 = tf.compat.v1.metrics.recall_at_top_k(tf.cast(stacked_labels, tf.int64), predictions['top5_class'], k=5)
acc_s = tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss, 'acc': accuracy[1]}
logging_hook = tf.estimator.LoggingTensorHook(
tensors=tensors_to_log, every_n_secs=10)
summary_op = [acc_s, lr_s]
summary_hook = tf.estimator.SummarySaverHook(
save_steps=100,
summary_op=summary_op)
train_hooks = [logging_hook, examples_sec_hook, summary_hook]
metrics = None
if (mode == tf.estimator.ModeKeys.EVAL):
metrics = {
'accuracy': accuracy,
#'accuracy_5': accuracy_5,
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
return _resnet_model_fn
def _tower_fn(is_training, weight_decay, feature, label, data_format,
resnet_size, batch_norm_decay, batch_norm_epsilon):
"""Build computation tower (Resnet).
Args:
is_training: true if is training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
data_format: channels_last (NHWC) or channels_first (NCHW).
num_layers: number of layers, an int.
batch_norm_decay: decay for batch normalization, a float.
batch_norm_epsilon: epsilon for batch normalization, a float.
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
network = resnet_model.imagenet_resnet_v2(resnet_size, 1000 + 1, data_format)
logits = network(feature, is_training=is_training)
_, top_k_indices = tf.nn.top_k(logits, k=5)
tower_pred = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits),
'top5_classes': top_k_indices
}
tower_loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label)
tower_loss = tf.reduce_mean(tower_loss)
model_params = tf.compat.v1.trainable_variables()
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
#tower_grad = tf.gradients(tower_loss, model_params)
return tower_loss, tower_pred
def input_fn(data_dir,
subset,
num_gpus,
batch_size,
num_epochs=1,
use_distortion_for_training=True):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
use_distortion_for_training: True to use distortions.
Returns:
two lists of tensors for features and labels, each of num_shards length.
"""
with tf.device('/cpu:0'):
use_distortion = subset == 'train' and use_distortion_for_training
dataset = imagenet.ImagenetDataSet(data_dir, subset, use_distortion)
print(num_gpus)
dataset = dataset.make_dataset(batch_size,
is_training=(subset == 'train'),
num_shards=num_gpus,
num_epochs=num_epochs)
return dataset
def main(output_dir, data_dir, num_gpus, train_epochs, epochs_per_eval, variable_strategy,
use_distortion_for_training, log_device_placement, num_intra_threads,
**hparams):
print('num of gpus:')
print(num_gpus)
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
is_dist = False
# handle dist traning info
if 'TF_CONFIG' in os.environ:
is_dist = True
tf_dist_conf = os.environ['TF_CONFIG']
conf = json.loads(tf_dist_conf)
if conf['task']['type'] == 'ps':
is_ps = True
else:
is_ps = False
'''
if is_ps:
if conf['task']['index'] == '0' or conf['task']['index'] == 0:
conf['cluster']['evaluator'] = [conf['cluster']['ps'][0]]
conf['task']['type'] = 'evaluator'
is_ps = False
print('ps act as evaluator')
'''
if conf['task']['type'] == 'master':
conf['task']['type'] = 'chief'
conf['cluster']['chief'] = conf['cluster']['master']
del conf['cluster']['master']
# If you need evaluator, we can change the role of last worker
n_workers = len(conf['cluster']['worker'])
last_worker = conf['cluster']['worker'][-1]
conf['cluster']['worker'] = conf['cluster']['worker'][0:-1]
conf['cluster']['evaluator'] = [last_worker]
if conf['task']['type'] == 'worker' and conf['task']['index'] == (n_workers - 1):
conf['task']['index'] = 0
conf['task']['type'] ='evaluator'
print(conf)
os.environ['TF_CONFIG'] = json.dumps(conf)
if is_dist:
if is_ps:
# dummy call, no usage for MultiWorkerMirroredStrategy() in dist train
distribution = tf.distribute.experimental.ParameterServerStrategy()
else:
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
distribution = tf.distribute.MirroredStrategy()
# Session configuration.
sess_config = tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.compat.v1.GPUOptions(force_gpu_compatible=True))
config = tf.estimator.RunConfig(model_dir=output_dir,
save_checkpoints_secs=3600,
train_distribute=distribution,
eval_distribute=distribution,
session_config=sess_config)
hparams['is_chief']=config.is_chief
resnet_classifier = tf.estimator.Estimator(
model_fn=get_model_fn(num_gpus, variable_strategy, config.num_worker_replicas or 1),
config=config,
params=hparams)
num_workers = (config.num_worker_replicas or 1)
batches_per_epoch = _NUM_IMAGES['train'] / (hparams['train_batch_size'] * num_workers)
max_steps = train_epochs * batches_per_epoch
batch_size = hparams['train_batch_size']
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(
data_dir,
subset='train',
num_gpus=num_gpus,
batch_size=batch_size,
use_distortion_for_training=use_distortion_for_training,
num_epochs=train_epochs),
max_steps=max_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(
data_dir,
subset='validation',
batch_size=hparams['eval_batch_size'],
num_gpus=num_gpus),
steps=500,
start_delay_secs=0)
tf.estimator.train_and_evaluate(
resnet_classifier,
train_spec,
eval_spec)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
"""UAI SDK use data_dir output_dir and num_gpus to transfer system specific data
"""
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='UAI SDK related. The directory where the imagenet input data is stored.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='UAI SDK related. The directory where the model will be stored.')
parser.add_argument(
'--variable-strategy',
choices=['CPU', 'GPU'],
type=str,
default='CPU',
help='Where to locate variable operations')
parser.add_argument(
'--num_gpus',
type=int,
default=1,
help='UAI SDK related. The number of gpus used.')
parser.add_argument(
'--resnet_size', type=int, default=50, choices=[18, 34, 50, 101, 152, 200],
help='The size of the ResNet model to use.')
parser.add_argument(
'--num-layers',
type=int,
default=44,
help='The number of layers of the model.')
parser.add_argument(
'--train-epochs',
type=int,
default=100,
help='The number of epochs to use for training.')
parser.add_argument(
'--epochs_per_eval',
type=int,
default=1,
help='The number of training epochs to run between evaluations.')
parser.add_argument(
'--train-batch-size',
type=int,
default=128,
help='Batch size for training.')
parser.add_argument(
'--eval-batch-size',
type=int,
default=100,
help='Batch size for validation.')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for MomentumOptimizer.')
parser.add_argument(
'--weight-decay',
type=float,
default=1e-4,
help='Weight decay for convolutions.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.1,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--use-distortion-for-training',
type=bool,
default=True,
help='If doing image distortion for training.')
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--data-format',
type=str,
default=None,
help="""\
If not set, the data format best for the training device is used.
Allowed values: channels_first (NCHW) channels_last (NHWC).\
""")
parser.add_argument(
'--log-device-placement',
action='store_true',
default=False,
help='Whether to log device placement.')
parser.add_argument(
'--batch-norm-decay',
type=float,
default=0.997,
help='Decay for batch norm.')
parser.add_argument(
'--batch-norm-epsilon',
type=float,
default=1e-5,
help='Epsilon for batch norm.')
parser.add_argument(
'--work_dir',
type=str,
default='/data/',
help='UAI SDK related.')
parser.add_argument(
'--log_dir',
type=str,
default='/data/data/',
help='UAI SDK related.'
)
args = parser.parse_args()
if args.num_gpus < 0:
raise ValueError(
'Invalid GPU count: \"--num-gpus\" must be 0 or a positive integer.')
if args.num_gpus == 0 and args.variable_strategy == 'GPU':
raise ValueError('num-gpus=0, CPU must be used as parameter server. Set'
'--variable-strategy=CPU.')
if (args.num_layers - 2) % 6 != 0:
raise ValueError('Invalid --num-layers parameter.')
if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:
raise ValueError('--train-batch-size must be multiple of --num-gpus.')
if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:
raise ValueError('--eval-batch-size must be multiple of --num-gpus.')
main(**vars(args))
|
{
"content_hash": "4d477a42a268c5c70c21f81b71563e0c",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 121,
"avg_line_length": 32.55507559395248,
"alnum_prop": 0.637298480727128,
"repo_name": "ucloud/uai-sdk",
"id": "52b59132f6d538d1752cd64dcdec79b587acd3c3",
"size": "15762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tensorflow-2.0/imagenet/train/code/imagenet_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "470557"
}
],
"symlink_target": ""
}
|
"""
A UNIX SSH server.
"""
import fcntl
import grp
import os
import pty
import pwd
import socket
import struct
import time
import tty
from zope.interface import implementer
from twisted.conch import ttymodes
from twisted.conch.avatar import ConchUser
from twisted.conch.error import ConchError
from twisted.conch.ls import lsLine
from twisted.conch.ssh import session, forwarding, filetransfer
from twisted.conch.ssh.filetransfer import (
FXF_READ, FXF_WRITE, FXF_APPEND, FXF_CREAT, FXF_TRUNC, FXF_EXCL
)
from twisted.conch.interfaces import ISession, ISFTPServer, ISFTPFile
from twisted.cred import portal
from twisted.internet.error import ProcessExitedAlready
from twisted.python import components, log
try:
import utmp
except ImportError:
utmp = None
@implementer(portal.IRealm)
class UnixSSHRealm:
def requestAvatar(self, username, mind, *interfaces):
user = UnixConchUser(username)
return interfaces[0], user, user.logout
class UnixConchUser(ConchUser):
def __init__(self, username):
ConchUser.__init__(self)
self.username = username
self.pwdData = pwd.getpwnam(self.username)
l = [self.pwdData[3]]
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
self.otherGroups = l
self.listeners = {} # Dict mapping (interface, port) -> listener
self.channelLookup.update(
{"session": session.SSHSession,
"direct-tcpip": forwarding.openConnectForwardingClient})
self.subsystemLookup.update(
{"sftp": filetransfer.FileTransferServer})
def getUserGroupId(self):
return self.pwdData[2:4]
def getOtherGroups(self):
return self.otherGroups
def getHomeDir(self):
return self.pwdData[5]
def getShell(self):
return self.pwdData[6]
def global_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
from twisted.internet import reactor
try:
listener = self._runAsUser(
reactor.listenTCP, portToBind,
forwarding.SSHListenForwardingFactory(
self.conn,
(hostToBind, portToBind),
forwarding.SSHListenServerForwardingChannel),
interface=hostToBind)
except:
return 0
else:
self.listeners[(hostToBind, portToBind)] = listener
if portToBind == 0:
portToBind = listener.getHost()[2] # The port
return 1, struct.pack('>L', portToBind)
else:
return 1
def global_cancel_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
listener = self.listeners.get((hostToBind, portToBind), None)
if not listener:
return 0
del self.listeners[(hostToBind, portToBind)]
self._runAsUser(listener.stopListening)
return 1
def logout(self):
# Remove all listeners.
for listener in self.listeners.itervalues():
self._runAsUser(listener.stopListening)
log.msg(
'avatar %s logging out (%i)'
% (self.username, len(self.listeners)))
def _runAsUser(self, f, *args, **kw):
euid = os.geteuid()
egid = os.getegid()
groups = os.getgroups()
uid, gid = self.getUserGroupId()
os.setegid(0)
os.seteuid(0)
os.setgroups(self.getOtherGroups())
os.setegid(gid)
os.seteuid(uid)
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
try:
for i in f:
func = i[0]
args = len(i) > 1 and i[1] or ()
kw = len(i) > 2 and i[2] or {}
r = func(*args, **kw)
finally:
os.setegid(0)
os.seteuid(0)
os.setgroups(groups)
os.setegid(egid)
os.seteuid(euid)
return r
@implementer(ISession)
class SSHSessionForUnixConchUser:
def __init__(self, avatar, reactor=None):
"""
Construct an C{SSHSessionForUnixConchUser}.
@param avatar: The L{UnixConchUser} for whom this is an SSH session.
@param reactor: An L{IReactorProcess} used to handle shell and exec
requests. Uses the default reactor if None.
"""
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.avatar = avatar
self.environ = {'PATH': '/bin:/usr/bin:/usr/local/bin'}
self.pty = None
self.ptyTuple = 0
def addUTMPEntry(self, loggedIn=1):
if not utmp:
return
ipAddress = self.avatar.conn.transport.transport.getPeer().host
packedIp, = struct.unpack('L', socket.inet_aton(ipAddress))
ttyName = self.ptyTuple[2][5:]
t = time.time()
t1 = int(t)
t2 = int((t-t1) * 1e6)
entry = utmp.UtmpEntry()
entry.ut_type = loggedIn and utmp.USER_PROCESS or utmp.DEAD_PROCESS
entry.ut_pid = self.pty.pid
entry.ut_line = ttyName
entry.ut_id = ttyName[-4:]
entry.ut_tv = (t1, t2)
if loggedIn:
entry.ut_user = self.avatar.username
entry.ut_host = socket.gethostbyaddr(ipAddress)[0]
entry.ut_addr_v6 = (packedIp, 0, 0, 0)
a = utmp.UtmpRecord(utmp.UTMP_FILE)
a.pututline(entry)
a.endutent()
b = utmp.UtmpRecord(utmp.WTMP_FILE)
b.pututline(entry)
b.endutent()
def getPty(self, term, windowSize, modes):
self.environ['TERM'] = term
self.winSize = windowSize
self.modes = modes
master, slave = pty.openpty()
ttyname = os.ttyname(slave)
self.environ['SSH_TTY'] = ttyname
self.ptyTuple = (master, slave, ttyname)
def openShell(self, proto):
if not self.ptyTuple: # We didn't get a pty-req.
log.msg('tried to get shell without pty, failing')
raise ConchError("no pty")
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell()
self.environ['USER'] = self.avatar.username
self.environ['HOME'] = homeDir
self.environ['SHELL'] = shell
shellExec = os.path.basename(shell)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (
peer.host, peer.port, host.port)
self.getPtyOwnership()
self.pty = self._reactor.spawnProcess(
proto, shell, ['-%s' % (shellExec,)], self.environ, homeDir, uid,
gid, usePTY=self.ptyTuple)
self.addUTMPEntry()
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
if self.modes:
self.setModes()
self.oldWrite = proto.transport.write
proto.transport.write = self._writeHack
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def execCommand(self, proto, cmd):
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell() or '/bin/sh'
self.environ['HOME'] = homeDir
command = (shell, '-c', cmd)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (
peer.host, peer.port, host.port)
if self.ptyTuple:
self.getPtyOwnership()
self.pty = self._reactor.spawnProcess(
proto, shell, command, self.environ, homeDir, uid, gid,
usePTY=self.ptyTuple or 0)
if self.ptyTuple:
self.addUTMPEntry()
if self.modes:
self.setModes()
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def getPtyOwnership(self):
ttyGid = os.stat(self.ptyTuple[2])[5]
uid, gid = self.avatar.getUserGroupId()
euid, egid = os.geteuid(), os.getegid()
os.setegid(0)
os.seteuid(0)
try:
os.chown(self.ptyTuple[2], uid, ttyGid)
finally:
os.setegid(egid)
os.seteuid(euid)
def setModes(self):
pty = self.pty
attr = tty.tcgetattr(pty.fileno())
for mode, modeValue in self.modes:
if mode not in ttymodes.TTYMODES:
continue
ttyMode = ttymodes.TTYMODES[mode]
if len(ttyMode) == 2: # Flag.
flag, ttyAttr = ttyMode
if not hasattr(tty, ttyAttr):
continue
ttyval = getattr(tty, ttyAttr)
if modeValue:
attr[flag] = attr[flag] | ttyval
else:
attr[flag] = attr[flag] & ~ttyval
elif ttyMode == 'OSPEED':
attr[tty.OSPEED] = getattr(tty, 'B%s' % (modeValue,))
elif ttyMode == 'ISPEED':
attr[tty.ISPEED] = getattr(tty, 'B%s' % (modeValue,))
else:
if not hasattr(tty, ttyMode):
continue
ttyval = getattr(tty, ttyMode)
attr[tty.CC][ttyval] = chr(modeValue)
tty.tcsetattr(pty.fileno(), tty.TCSANOW, attr)
def eofReceived(self):
if self.pty:
self.pty.closeStdin()
def closed(self):
if self.ptyTuple and os.path.exists(self.ptyTuple[2]):
ttyGID = os.stat(self.ptyTuple[2])[5]
os.chown(self.ptyTuple[2], 0, ttyGID)
if self.pty:
try:
self.pty.signalProcess('HUP')
except (OSError, ProcessExitedAlready):
pass
self.pty.loseConnection()
self.addUTMPEntry(0)
log.msg('shell closed')
def windowChanged(self, winSize):
self.winSize = winSize
fcntl.ioctl(
self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
def _writeHack(self, data):
"""
Hack to send ignore messages when we aren't echoing.
"""
if self.pty is not None:
attr = tty.tcgetattr(self.pty.fileno())[3]
if not attr & tty.ECHO and attr & tty.ICANON: # No echo.
self.avatar.conn.transport.sendIgnore('\x00'*(8+len(data)))
self.oldWrite(data)
@implementer(ISFTPServer)
class SFTPServerForUnixConchUser:
def __init__(self, avatar):
self.avatar = avatar
def _setAttrs(self, path, attrs):
"""
NOTE: this function assumes it runs as the logged-in user:
i.e. under _runAsUser()
"""
if "uid" in attrs and "gid" in attrs:
os.chown(path, attrs["uid"], attrs["gid"])
if "permissions" in attrs:
os.chmod(path, attrs["permissions"])
if "atime" in attrs and "mtime" in attrs:
os.utime(path, (attrs["atime"], attrs["mtime"]))
def _getAttrs(self, s):
return {
"size": s.st_size,
"uid": s.st_uid,
"gid": s.st_gid,
"permissions": s.st_mode,
"atime": int(s.st_atime),
"mtime": int(s.st_mtime)
}
def _absPath(self, path):
home = self.avatar.getHomeDir()
return os.path.abspath(os.path.join(home, path))
def gotVersion(self, otherVersion, extData):
return {}
def openFile(self, filename, flags, attrs):
return UnixSFTPFile(self, self._absPath(filename), flags, attrs)
def removeFile(self, filename):
filename = self._absPath(filename)
return self.avatar._runAsUser(os.remove, filename)
def renameFile(self, oldpath, newpath):
oldpath = self._absPath(oldpath)
newpath = self._absPath(newpath)
return self.avatar._runAsUser(os.rename, oldpath, newpath)
def makeDirectory(self, path, attrs):
path = self._absPath(path)
return self.avatar._runAsUser(
[(os.mkdir, (path,)), (self._setAttrs, (path, attrs))])
def removeDirectory(self, path):
path = self._absPath(path)
self.avatar._runAsUser(os.rmdir, path)
def openDirectory(self, path):
return UnixSFTPDirectory(self, self._absPath(path))
def getAttrs(self, path, followLinks):
path = self._absPath(path)
if followLinks:
s = self.avatar._runAsUser(os.stat, path)
else:
s = self.avatar._runAsUser(os.lstat, path)
return self._getAttrs(s)
def setAttrs(self, path, attrs):
path = self._absPath(path)
self.avatar._runAsUser(self._setAttrs, path, attrs)
def readLink(self, path):
path = self._absPath(path)
return self.avatar._runAsUser(os.readlink, path)
def makeLink(self, linkPath, targetPath):
linkPath = self._absPath(linkPath)
targetPath = self._absPath(targetPath)
return self.avatar._runAsUser(os.symlink, targetPath, linkPath)
def realPath(self, path):
return os.path.realpath(self._absPath(path))
def extendedRequest(self, extName, extData):
raise NotImplementedError
@implementer(ISFTPFile)
class UnixSFTPFile:
def __init__(self, server, filename, flags, attrs):
self.server = server
openFlags = 0
if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0:
openFlags = os.O_RDONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0:
openFlags = os.O_WRONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ:
openFlags = os.O_RDWR
if flags & FXF_APPEND == FXF_APPEND:
openFlags |= os.O_APPEND
if flags & FXF_CREAT == FXF_CREAT:
openFlags |= os.O_CREAT
if flags & FXF_TRUNC == FXF_TRUNC:
openFlags |= os.O_TRUNC
if flags & FXF_EXCL == FXF_EXCL:
openFlags |= os.O_EXCL
if "permissions" in attrs:
mode = attrs["permissions"]
del attrs["permissions"]
else:
mode = 0777
fd = server.avatar._runAsUser(os.open, filename, openFlags, mode)
if attrs:
server.avatar._runAsUser(server._setAttrs, filename, attrs)
self.fd = fd
def close(self):
return self.server.avatar._runAsUser(os.close, self.fd)
def readChunk(self, offset, length):
return self.server.avatar._runAsUser(
[(os.lseek, (self.fd, offset, 0)),
(os.read, (self.fd, length))])
def writeChunk(self, offset, data):
return self.server.avatar._runAsUser(
[(os.lseek, (self.fd, offset, 0)),
(os.write, (self.fd, data))])
def getAttrs(self):
s = self.server.avatar._runAsUser(os.fstat, self.fd)
return self.server._getAttrs(s)
def setAttrs(self, attrs):
raise NotImplementedError
class UnixSFTPDirectory:
def __init__(self, server, directory):
self.server = server
self.files = server.avatar._runAsUser(os.listdir, directory)
self.dir = directory
def __iter__(self):
return self
def next(self):
try:
f = self.files.pop(0)
except IndexError:
raise StopIteration
else:
s = self.server.avatar._runAsUser(
os.lstat, os.path.join(self.dir, f))
longname = lsLine(f, s)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
def close(self):
self.files = []
components.registerAdapter(
SFTPServerForUnixConchUser, UnixConchUser, filetransfer.ISFTPServer)
components.registerAdapter(
SSHSessionForUnixConchUser, UnixConchUser, session.ISession)
|
{
"content_hash": "0d421f24399bc1d1c3f0a200e0e48b67",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 77,
"avg_line_length": 30.432075471698113,
"alnum_prop": 0.5769111538223076,
"repo_name": "bdh1011/wau",
"id": "58173a76b4a48237244d4daef828eff934f2f33e",
"size": "16202",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/twisted/conch/unix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
import socket,select
host="127.0.0.1"
port=9999
sever_address=(host,port)
fd=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
fd.connect(sever_address)
while 1:
cmd=input("client : ").encode("utf-8")
#print(cmd)
if cmd==b'':
print("don't allow empty,please retry!\n")
continue
fd.send(cmd)
#buff=fd.recv(1024).decode("utf-8")
#print(buff)
fd.close()
|
{
"content_hash": "853b4a1d63dc2e2aa52620e2e4b5e7ca",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 22.5625,
"alnum_prop": 0.6952908587257618,
"repo_name": "insertion/socks0_python",
"id": "dd2dea8a03457b77d3fbc08ffee6ba440e5eb740",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socks0_client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4625"
}
],
"symlink_target": ""
}
|
import logging
import boto3
logger = logging.getLogger(__name__)
SNS_CLIENT = None
def get_client():
global SNS_CLIENT
if not SNS_CLIENT:
SNS_CLIENT = boto3.client('sns')
return SNS_CLIENT
def send_sns_message(TopicArn=None, Message=None):
sns_client = get_client()
return sns_client.publish(
TopicArn=TopicArn,
Message=Message,
)
def publish_sns_message(topic):
def _send(event):
response = send_sns_message(
TopicArn=topic,
Message=event,
)
if 'MessageId' not in response:
raise Exception("Failed to send message topic: {} event: {}".format(topic, event))
logger.info("Sent message topic: %s message_id: %s message: %s", topic, response['MessageId'], event)
return _send
def handle_sns_message(responder):
def _recieve(sns_event, context):
logger.info("Recieved an sns event: %s", sns_event)
for record in sns_event.get('Records', []):
sns = record.get('Sns', {})
message = sns.get('Message')
if not message:
raise Exception("Got sns event without a message")
responder(message)
return _recieve
|
{
"content_hash": "86f0a92b0be3d3910ab7007e3a401a8d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 109,
"avg_line_length": 23.150943396226417,
"alnum_prop": 0.5998370008149959,
"repo_name": "bepress/xavier",
"id": "0807808fe7fe3ed7afbb455fc22b671ca5fce662",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xavier/aws/sns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "121"
},
{
"name": "Python",
"bytes": "32311"
},
{
"name": "Shell",
"bytes": "422"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from mock import MagicMock
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants.util.dirutil import safe_rmtree
from pants_test.tasks.task_test_base import TaskTestBase
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
# TODO (tdesai) Issue-240: Use JvmToolTaskTestBase for ScroogeGenTest
class ScroogeGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_thrift_library': JavaThriftLibrary,
'java_library': JavaLibrary,
'scala_library': ScalaLibrary})
def setUp(self):
super(ScroogeGenTest, self).setUp()
self.task_outdir = os.path.join(self.build_root, 'scrooge', 'gen-java')
def tearDown(self):
super(ScroogeGenTest, self).tearDown()
safe_rmtree(self.task_outdir)
def test_validate_compiler_configs(self):
# Set synthetic defaults for the global scope.
self.set_options_for_scope('thrift-defaults',
compiler='unchecked',
language='uniform',
rpc_style='async')
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='three',
sources=[],
dependencies=[':one'],
rpc_style='finagle',
)
'''))
target = self.target('test_validate:one')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._validate_compiler_configs([self.target('test_validate:one')])
task._validate_compiler_configs([self.target('test_validate:two')])
with self.assertRaises(TaskError):
task._validate_compiler_configs([self.target('test_validate:three')])
def test_scala(self):
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/example/Example.scala')]
self._test_help('scala', 'finagle', ScalaLibrary, sources)
def test_android(self):
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/android_example/Example.java')]
self._test_help('android', 'finagle', JavaLibrary, sources)
def test_invalid_lang(self):
with self.assertRaises(TargetDefinitionException):
self._test_help('not-a-lang', 'finagle', JavaLibrary, [])
def test_invalid_style(self):
with self.assertRaises(TargetDefinitionException):
self._test_help('scala', 'not-a-style', JavaLibrary, [])
def _test_help(self, language, rpc_style, library_type, sources):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
build_string = dedent('''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='{language}',
rpc_style='{rpc_style}'
)
'''.format(language=language, rpc_style=rpc_style))
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('test_smoke', build_string)
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._declares_service = lambda source: False
task._outdir = MagicMock()
task._outdir.return_value = self.task_outdir
task.gen = MagicMock()
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
mock = MagicMock()
Context.add_new_target = mock
task.execute()
self.assertEquals(1, mock.call_count)
_, call_kwargs = mock.call_args
self.assertEquals(call_kwargs['target_type'], library_type)
self.assertEquals(call_kwargs['dependencies'], OrderedSet())
self.assertEquals(call_kwargs['provides'], None)
self.assertEquals(call_kwargs['sources'], [])
self.assertEquals(call_kwargs['derived_from'], target)
finally:
Context.add_new_target = saved_add_new_target
|
{
"content_hash": "021c2d0f11a41e20bce168ad9031a1a1",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 93,
"avg_line_length": 34.83098591549296,
"alnum_prop": 0.6637687019813991,
"repo_name": "manasapte/pants",
"id": "d73bace5fcbc721be23df8776264c5c53cd173dd",
"size": "5093",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "438730"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5084384"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
}
|
from testtools import TestCase
from kmip.core.factories.keys import KeyFactory
from kmip.core.factories.secrets import SecretFactory
from kmip.core.factories.attributes import AttributeFactory
from kmip.core import attributes as attr
from kmip.core.attributes import ApplicationData
from kmip.core.attributes import ApplicationNamespace
from kmip.core.attributes import ApplicationSpecificInformation
from kmip.core.attributes import ContactInformation
from kmip.core.attributes import CryptographicAlgorithm
from kmip.core.attributes import CryptographicLength
from kmip.core.attributes import Name
from kmip.core.attributes import ObjectGroup
from kmip.core import enums
from kmip.core.enums import AttributeType
from kmip.core.enums import CryptographicAlgorithm as CryptoAlgorithmEnum
from kmip.core.enums import CryptographicUsageMask
from kmip.core.enums import NameType
from kmip.core import errors
from kmip.core.errors import ErrorStrings
from kmip.core import objects
from kmip.core.messages import contents
from kmip.core.messages import messages
from kmip.core.messages.payloads import create
from kmip.core.messages.payloads import get
from kmip.core.messages.payloads import register
from kmip.core.messages.payloads import locate
from kmip.core.messages.payloads import destroy
from kmip.core.misc import KeyFormatType
from kmip.core.primitives import TextString
from kmip.core.secrets import SymmetricKey
from kmip.core.secrets import Template
from kmip.core import utils
from kmip.core.utils import BytearrayStream
class TestRequestMessage(TestCase):
def setUp(self):
super(TestRequestMessage, self).setUp()
self.stream = BytearrayStream()
self.attribute_factory = AttributeFactory()
self.msg = errors.ErrorStrings.BAD_EXP_RECV
self.create = (
b'\x42\x00\x78\x01\x00\x00\x01\x20\x42\x00\x77\x01\x00\x00\x00\x38'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\xD8'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x79\x01\x00\x00\x00\xC0\x42\x00\x57\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x02\x00\x00\x00\x00\x42\x00\x91\x01\x00\x00\x00\xA8'
b'\x42\x00\x08\x01\x00\x00\x00\x30\x42\x00\x0A\x07\x00\x00\x00\x17'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x41\x6C'
b'\x67\x6F\x72\x69\x74\x68\x6D\x00\x42\x00\x0B\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x03\x00\x00\x00\x00\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x14\x43\x72\x79\x70\x74\x6F\x67\x72'
b'\x61\x70\x68\x69\x63\x20\x4C\x65\x6E\x67\x74\x68\x00\x00\x00\x00'
b'\x42\x00\x0B\x02\x00\x00\x00\x04\x00\x00\x00\x80\x00\x00\x00\x00'
b'\x42\x00\x08\x01\x00\x00\x00\x30\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B\x42\x00\x0B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x0C\x00\x00\x00\x00')
self.register = (
b'\x42\x00\x78\x01\x00\x00\x01\xC8\x42\x00\x77\x01\x00\x00\x00\x38'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x01\x80'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x79\x01\x00\x00\x01\x68\x42\x00\x57\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x06\x00\x00\x00\x00\x42\x00\x91\x01\x00\x00\x00\x00'
b'\x42\x00\x90\x01\x00\x00\x01\x48\x42\x00\x08\x01\x00\x00\x00\x28'
b'\x42\x00\x0A\x07\x00\x00\x00\x0C\x4F\x62\x6A\x65\x63\x74\x20\x47'
b'\x72\x6F\x75\x70\x00\x00\x00\x00\x42\x00\x0B\x07\x00\x00\x00\x06'
b'\x47\x72\x6F\x75\x70\x31\x00\x00\x42\x00\x08\x01\x00\x00\x00\x58'
b'\x42\x00\x0A\x07\x00\x00\x00\x20\x41\x70\x70\x6C\x69\x63\x61\x74'
b'\x69\x6F\x6E\x20\x53\x70\x65\x63\x69\x66\x69\x63\x20\x49\x6E\x66'
b'\x6F\x72\x6D\x61\x74\x69\x6F\x6E\x42\x00\x0B\x01\x00\x00\x00\x28'
b'\x42\x00\x03\x07\x00\x00\x00\x03\x73\x73\x6C\x00\x00\x00\x00\x00'
b'\x42\x00\x02\x07\x00\x00\x00\x0F\x77\x77\x77\x2E\x65\x78\x61\x6D'
b'\x70\x6C\x65\x2E\x63\x6F\x6D\x00\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x13\x43\x6F\x6E\x74\x61\x63\x74\x20'
b'\x49\x6E\x66\x6F\x72\x6D\x61\x74\x69\x6F\x6E\x00\x00\x00\x00\x00'
b'\x42\x00\x0B\x07\x00\x00\x00\x03\x4A\x6F\x65\x00\x00\x00\x00\x00'
b'\x42\x00\x08\x01\x00\x00\x00\x30\x42\x00\x0A\x07\x00\x00\x00\x09'
b'\x78\x2D\x50\x75\x72\x70\x6F\x73\x65\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x0B\x07\x00\x00\x00\x0D\x64\x65\x6D\x6F\x6E\x73\x74\x72'
b'\x61\x74\x69\x6F\x6E\x00\x00\x00\x42\x00\x08\x01\x00\x00\x00\x40'
b'\x42\x00\x0A\x07\x00\x00\x00\x04\x4E\x61\x6D\x65\x00\x00\x00\x00'
b'\x42\x00\x0B\x01\x00\x00\x00\x28\x42\x00\x55\x07\x00\x00\x00\x09'
b'\x54\x65\x6D\x70\x6C\x61\x74\x65\x31\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x54\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
self.get = (
b'\x42\x00\x78\x01\x00\x00\x00\x90\x42\x00\x77\x01\x00\x00\x00\x38'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\x48'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x0A\x00\x00\x00\x00'
b'\x42\x00\x79\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00')
self.destroy = (
b'\x42\x00\x78\x01\x00\x00\x00\x90\x42\x00\x77\x01\x00\x00\x00\x38'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\x48'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x14\x00\x00\x00\x00'
b'\x42\x00\x79\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x66\x62\x34\x62\x35\x62\x39\x63\x2D\x36\x31\x38\x38\x2D\x34\x63'
b'\x36\x33\x2D\x38\x31\x34\x32\x2D\x66\x65\x39\x63\x33\x32\x38\x31'
b'\x32\x39\x66\x63\x00\x00\x00\x00'
)
# kmip-testcases-v1.1 section 3.1.3
self.locate = (
b'\x42\x00\x78\x01\x00\x00\x00\xd0\x42\x00\x77\x01\x00\x00\x00\x38'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6a\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6b\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0d\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0f\x01\x00\x00\x00\x88'
b'\x42\x00\x5c\x05\x00\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\x00'
b'\x42\x00\x79\x01\x00\x00\x00\x70\x42\x00\x08\x01\x00\x00\x00\x28'
b'\x42\x00\x0a\x07\x00\x00\x00\x0b\x4f\x62\x6a\x65\x63\x74\x20\x54'
b'\x79\x70\x65\x00\x00\x00\x00\x00\x42\x00\x0b\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x02\x00\x00\x00\x00\x42\x00\x08\x01\x00\x00\x00\x38'
b'\x42\x00\x0a\x07\x00\x00\x00\x04\x4e\x61\x6d\x65\x00\x00\x00\x00'
b'\x42\x00\x0b\x01\x00\x00\x00\x20\x42\x00\x55\x07\x00\x00\x00\x04'
b'\x4b\x65\x79\x31\x00\x00\x00\x00\x42\x00\x54\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00')
def tearDown(self):
super(TestRequestMessage, self).tearDown()
def test_create_request_read(self):
self.stream = BytearrayStream(self.create)
request_message = messages.RequestMessage()
request_message.read(self.stream)
request_header = request_message.request_header
msg = "Bad request header type: expected {0}, received{1}"
self.assertIsInstance(request_header, messages.RequestHeader,
msg.format(messages.RequestHeader,
type(request_header)))
protocol_version = request_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
batch_count = request_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = request_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
batch_item = batch_items[0]
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.RequestBatchItem,
msg.format(messages.RequestBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
self.assertEqual(enums.Operation.CREATE, operation.enum,
msg.format(enums.Operation.CREATE,
operation.enum))
request_payload = batch_item.request_payload
msg = "Bad request payload type: expected {0}, received {1}"
self.assertIsInstance(request_payload,
create.CreateRequestPayload,
msg.format(create.CreateRequestPayload,
type(request_payload)))
object_type = request_payload.object_type
msg = "Bad object type type: expected {0}, received {1}"
self.assertIsInstance(object_type, attr.ObjectType,
msg.format(attr.ObjectType,
type(object_type)))
msg = "Bad object type value: expected {0}, received {1}"
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, object_type.enum,
msg.format(enums.ObjectType.SYMMETRIC_KEY,
object_type.enum))
template_attribute = request_payload.template_attribute
msg = "Bad template attribute type: expected {0}, received {1}"
self.assertIsInstance(template_attribute,
objects.TemplateAttribute,
msg.format(objects.TemplateAttribute,
type(template_attribute)))
attributes = template_attribute.attributes
self.assertIsInstance(attributes, list,
self.msg.format('attributes', 'type',
list, type(attributes)))
self.assertEquals(3, len(attributes),
self.msg.format('attributes', 'length',
3, len(attributes)))
attribute_a = attributes[0]
self.assertIsInstance(attribute_a, objects.Attribute,
self.msg.format('attribute', 'type',
objects.Attribute,
type(attribute_a)))
attribute_name = attribute_a.attribute_name
self.assertIsInstance(attribute_name, objects.Attribute.AttributeName,
self.msg.format('attribute name', 'type',
objects.Attribute.AttributeName,
type(attribute_name)))
self.assertEquals('Cryptographic Algorithm', attribute_name.value,
self.msg.format('attribute name', 'value',
'Cryptographic Algorithm',
attribute_name.value))
attribute_value = attribute_a.attribute_value
exp_type = attr.CryptographicAlgorithm
rcv_type = type(attribute_value)
self.assertIsInstance(attribute_value, exp_type,
self.msg.format('attribute value', 'type',
exp_type, rcv_type))
self.assertEquals(attribute_value.enum,
enums.CryptographicAlgorithm.AES,
self.msg.format('cryptographic algorithm', 'value',
enums.CryptographicAlgorithm.AES,
attribute_value.enum))
attribute_b = attributes[1]
self.assertIsInstance(attribute_b, objects.Attribute,
self.msg.format('attribute', 'type',
objects.Attribute,
type(attribute_b)))
attribute_name = attribute_b.attribute_name
self.assertIsInstance(attribute_name, objects.Attribute.AttributeName,
self.msg.format('attribute name', 'type',
objects.Attribute.AttributeName,
type(attribute_name)))
self.assertEquals('Cryptographic Length', attribute_name.value,
self.msg.format('attribute name', 'value',
'Cryptographic Length',
attribute_name.value))
attribute_value = attribute_b.attribute_value
exp_type = attr.CryptographicLength
rcv_type = type(attribute_value)
self.assertIsInstance(attribute_value, exp_type,
self.msg.format('attribute value', 'type',
exp_type, rcv_type))
self.assertEquals(attribute_value.value, 128,
self.msg.format('cryptographic length', 'value',
128, attribute_value.value))
attribute_c = attributes[2]
self.assertIsInstance(attribute_c, objects.Attribute,
self.msg.format('attribute', 'type',
objects.Attribute,
type(attribute_b)))
attribute_name = attribute_c.attribute_name
self.assertIsInstance(attribute_name, objects.Attribute.AttributeName,
self.msg.format('attribute name', 'type',
objects.Attribute.AttributeName,
type(attribute_name)))
self.assertEquals('Cryptographic Usage Mask', attribute_name.value,
self.msg.format('attribute name', 'value',
'Cryptographic Usage Mask',
attribute_name.value))
attribute_value = attribute_c.attribute_value
exp_type = attr.CryptographicUsageMask
rcv_type = type(attribute_value)
self.assertIsInstance(attribute_value, exp_type,
self.msg.format('attribute value', 'type',
exp_type, rcv_type))
flag_encrypt = CryptographicUsageMask.ENCRYPT
flag_decrypt = CryptographicUsageMask.DECRYPT
exp_value = flag_encrypt.value | flag_decrypt.value
self.assertEquals(attribute_value.value, exp_value,
self.msg.format('cryptographic usage mask', 'value',
exp_value, attribute_value.value))
def test_create_request_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
batch_count = contents.BatchCount(1)
request_header = messages.RequestHeader(protocol_version=prot_ver,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.CREATE)
object_type = attr.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
name = AttributeType.CRYPTOGRAPHIC_ALGORITHM
value = CryptoAlgorithmEnum.AES
attr_a = self.attribute_factory.create_attribute(name, value)
name = AttributeType.CRYPTOGRAPHIC_LENGTH
value = 128
attr_b = self.attribute_factory.create_attribute(name, value)
name = AttributeType.CRYPTOGRAPHIC_USAGE_MASK
value = [CryptographicUsageMask.ENCRYPT,
CryptographicUsageMask.DECRYPT]
attr_c = self.attribute_factory.create_attribute(name, value)
temp_attr = objects.TemplateAttribute(attributes=[attr_a, attr_b,
attr_c])
req_pl = create.CreateRequestPayload(object_type=object_type,
template_attribute=temp_attr)
batch_item = messages.RequestBatchItem(operation=operation,
request_payload=req_pl)
req_message = messages.RequestMessage(request_header=request_header,
batch_items=[batch_item])
req_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.create)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('request message', 'write',
len_exp, len_rcv))
msg = "Bad request message write: encoding mismatch"
self.assertEqual(self.create, result, msg)
def test_get_request_read(self):
self.stream = BytearrayStream(self.get)
request_message = messages.RequestMessage()
request_message.read(self.stream)
request_header = request_message.request_header
msg = "Bad request header type: expected {0}, received{0}"
self.assertIsInstance(request_header, messages.RequestHeader,
msg.format(messages.RequestHeader,
type(request_header)))
protocol_version = request_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
batch_count = request_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = request_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
batch_item = batch_items[0]
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.RequestBatchItem,
msg.format(messages.RequestBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
self.assertEqual(enums.Operation.GET, operation.enum,
msg.format(enums.Operation.GET,
operation.enum))
request_payload = batch_item.request_payload
msg = "Bad request payload type: expected {0}, received {1}"
self.assertIsInstance(request_payload,
get.GetRequestPayload,
msg.format(get.GetRequestPayload,
type(request_payload)))
unique_identifier = request_payload.unique_identifier
msg = "Bad unique identifier type: expected {0}, received {1}"
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
msg.format(attr.UniqueIdentifier,
type(unique_identifier)))
msg = "Bad unique identifier value: expected {0}, received {1}"
self.assertEqual('49a1ca88-6bea-4fb2-b450-7e58802c3038',
unique_identifier.value,
msg.format('49a1ca88-6bea-4fb2-b450-7e58802c3038',
unique_identifier.value))
def test_get_request_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
batch_count = contents.BatchCount(1)
req_header = messages.RequestHeader(protocol_version=prot_ver,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.GET)
uuid = attr.UniqueIdentifier('49a1ca88-6bea-4fb2-b450-7e58802c3038')
request_payload = get.GetRequestPayload(unique_identifier=uuid)
batch_item = messages.RequestBatchItem(operation=operation,
request_payload=request_payload)
request_message = messages.RequestMessage(request_header=req_header,
batch_items=[batch_item])
request_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.get)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('request message', 'write',
len_exp, len_rcv))
msg = "Bad request message write: encoding mismatch"
self.assertEqual(self.get, result, msg)
def test_destroy_request_read(self):
self.stream = BytearrayStream(self.destroy)
request_message = messages.RequestMessage()
request_message.read(self.stream)
request_header = request_message.request_header
msg = "Bad request header type: expected {0}, received{0}"
self.assertIsInstance(request_header, messages.RequestHeader,
msg.format(messages.RequestHeader,
type(request_header)))
protocol_version = request_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
batch_count = request_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = request_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
batch_item = batch_items[0]
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.RequestBatchItem,
msg.format(messages.RequestBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
exp_value = enums.Operation.DESTROY
rcv_value = operation.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
request_payload = batch_item.request_payload
msg = "Bad request payload type: expected {0}, received {1}"
exp_type = destroy.DestroyRequestPayload
rcv_type = type(request_payload)
self.assertIsInstance(request_payload, exp_type,
msg.format(exp_type, rcv_type))
unique_identifier = request_payload.unique_identifier
msg = "Bad unique identifier type: expected {0}, received {1}"
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
msg.format(attr.UniqueIdentifier,
type(unique_identifier)))
msg = "Bad unique identifier value: expected {0}, received {1}"
exp_value = 'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
rcv_value = unique_identifier.value
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
def test_destroy_request_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
batch_count = contents.BatchCount(1)
req_header = messages.RequestHeader(protocol_version=prot_ver,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.DESTROY)
uuid = attr.UniqueIdentifier('fb4b5b9c-6188-4c63-8142-fe9c328129fc')
request_payload = destroy.DestroyRequestPayload(unique_identifier=uuid)
batch_item = messages.RequestBatchItem(operation=operation,
request_payload=request_payload)
request_message = messages.RequestMessage(request_header=req_header,
batch_items=[batch_item])
request_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.destroy)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('request message', 'write',
len_exp, len_rcv))
msg = "Bad request message write: encoding mismatch"
self.assertEqual(self.destroy, result, msg)
def test_register_request_read(self):
self.stream = BytearrayStream(self.register)
request_message = messages.RequestMessage()
request_message.read(self.stream)
request_header = request_message.request_header
msg = "Bad request header type: expected {0}, received{0}"
self.assertIsInstance(request_header, messages.RequestHeader,
msg.format(messages.RequestHeader,
type(request_header)))
protocol_version = request_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
batch_count = request_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = request_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
for batch_item in batch_items:
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.RequestBatchItem,
msg.format(messages.RequestBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
exp_value = enums.Operation.REGISTER
rcv_value = operation.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
request_payload = batch_item.request_payload
msg = "Bad request payload type: expected {0}, received {1}"
exp_type = register.RegisterRequestPayload
rcv_type = type(request_payload)
self.assertIsInstance(request_payload, exp_type,
msg.format(exp_type, rcv_type))
object_type = request_payload.object_type
msg = "Bad object type type: expected {0}, received {1}"
self.assertIsInstance(object_type, attr.ObjectType,
msg.format(attr.ObjectType,
type(object_type)))
msg = "Bad object type value: expected {0}, received {1}"
exp_value = enums.ObjectType.TEMPLATE
rcv_value = object_type.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
template_attribute = request_payload.template_attribute
msg = "Bad template attribute type: expected {0}, received {1}"
exp_type = objects.TemplateAttribute
rcv_type = type(template_attribute)
self.assertIsInstance(template_attribute, exp_type,
msg.format(exp_type, rcv_type))
names = template_attribute.names
exp_type = list
rcv_type = type(names)
msg = ErrorStrings.BAD_EXP_RECV.format('TemplateAttribute.names',
'type', '{0}', '{0}')
self.assertIsInstance(names, exp_type,
msg.format(exp_type, rcv_type))
exp_length = 0
rcv_length = len(names)
msg = ErrorStrings.BAD_EXP_RECV.format('TemplateAttribute.names',
'length', '{0}', '{0}')
self.assertEqual(exp_length, rcv_length,
msg.format(exp_length, rcv_length))
attributes = template_attribute.attributes
exp_type = list
rcv_type = type(attributes)
msg = ErrorStrings.BAD_EXP_RECV.format(
'TemplateAttribute.attributes', 'type', '{0}', '{1}')
self.assertIsInstance(names, exp_type,
msg.format(exp_type, rcv_type))
exp_length = 0
rcv_length = len(attributes)
msg = ErrorStrings.BAD_EXP_RECV.format(
'TemplateAttribute.attributes', 'length', '{0}', '{1}')
self.assertEqual(exp_length, rcv_length,
msg.format(exp_length, rcv_length))
def test_register_request_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
batch_count = contents.BatchCount(1)
req_header = messages.RequestHeader(protocol_version=prot_ver,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.REGISTER)
object_type = attr.ObjectType(enums.ObjectType.TEMPLATE)
tmpl_attr = objects.TemplateAttribute()
attributes = []
name = objects.Attribute.AttributeName('Object Group')
value = ObjectGroup('Group1')
attribute = objects.Attribute(attribute_name=name,
attribute_value=value)
attributes.append(attribute)
name = objects.Attribute.AttributeName('Application Specific '
'Information')
ap_n_name = 'ssl'
ap_n_value = 'www.example.com'
ap_n = ApplicationNamespace(ap_n_name)
ap_d = ApplicationData(ap_n_value)
value = ApplicationSpecificInformation(application_namespace=ap_n,
application_data=ap_d)
attribute = objects.Attribute(attribute_name=name,
attribute_value=value)
attributes.append(attribute)
name = objects.Attribute.AttributeName('Contact Information')
value = ContactInformation('Joe')
attribute = objects.Attribute(attribute_name=name,
attribute_value=value)
attributes.append(attribute)
name = objects.Attribute.AttributeName('x-Purpose')
value = TextString('demonstration')
attribute = objects.Attribute(attribute_name=name,
attribute_value=value)
attributes.append(attribute)
name = objects.Attribute.AttributeName('Name')
name_value = Name.NameValue('Template1')
name_type = Name.NameType(NameType.UNINTERPRETED_TEXT_STRING)
value = Name(name_value=name_value,
name_type=name_type)
attribute = objects.Attribute(attribute_name=name,
attribute_value=value)
attributes.append(attribute)
template = Template(attributes=attributes)
request_payload = register.RegisterRequestPayload(
object_type=object_type,
template_attribute=tmpl_attr,
secret=template)
batch_item = messages.RequestBatchItem(operation=operation,
request_payload=request_payload)
request_message = messages.RequestMessage(request_header=req_header,
batch_items=[batch_item])
request_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.register)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('request message', 'write',
len_exp, len_rcv))
msg = "Bad request message write: encoding mismatch"
self.assertEqual(self.register, result, msg)
def test_locate_request_read(self):
self.stream = BytearrayStream(self.locate)
request_message = messages.RequestMessage()
request_message.read(self.stream)
request_header = request_message.request_header
msg = "Bad request header type: expected {0}, received{0}"
self.assertIsInstance(request_header, messages.RequestHeader,
msg.format(messages.RequestHeader,
type(request_header)))
protocol_version = request_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
batch_count = request_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = request_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
batch_item = batch_items[0]
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.RequestBatchItem,
msg.format(messages.RequestBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
exp_value = enums.Operation.LOCATE
rcv_value = operation.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
request_payload = batch_item.request_payload
msg = "Bad request payload type: expected {0}, received {1}"
exp_type = locate.LocateRequestPayload
rcv_type = type(request_payload)
self.assertIsInstance(request_payload, exp_type,
msg.format(exp_type, rcv_type))
attributes = request_payload.attributes
msg = "Bad attributes type: expected {0}, received {1}"
exp_type = list
rcv_type = type(attributes)
self.assertIsInstance(attributes, exp_type,
msg.format(exp_type, rcv_type))
self.assertEqual(2, len(attributes),
self.msg.format('attribute', 'length',
2, len(attributes)))
attribute_a = attributes[0]
self.assertIsInstance(attribute_a, objects.Attribute,
self.msg.format('attribute', 'type',
objects.Attribute,
type(attribute_a)))
attribute_name = attribute_a.attribute_name
self.assertIsInstance(attribute_name, objects.Attribute.AttributeName,
self.msg.format('attribute name', 'type',
objects.Attribute.AttributeName,
type(attribute_name)))
self.assertEquals('Object Type', attribute_name.value,
self.msg.format('attribute name', 'value',
'Object Type',
attribute_name.value))
attribute_value = attribute_a.attribute_value
exp_type = attr.Enumeration
rcv_type = type(attribute_value)
self.assertIsInstance(attribute_value, exp_type,
self.msg.format('attribute value', 'type',
exp_type, rcv_type))
self.assertEquals(attribute_value.enum, enums.ObjectType.SYMMETRIC_KEY,
self.msg.format('ObjectType', 'value',
enums.ObjectType.SYMMETRIC_KEY,
attribute_value.enum))
attribute_b = attributes[1]
self.assertIsInstance(attribute_b, objects.Attribute,
self.msg.format('attribute', 'type',
objects.Attribute,
type(attribute_a)))
attribute_name = attribute_b.attribute_name
self.assertIsInstance(attribute_name, objects.Attribute.AttributeName,
self.msg.format('attribute name', 'type',
objects.Attribute.AttributeName,
type(attribute_name)))
self.assertEquals('Name', attribute_name.value,
self.msg.format('attribute name', 'value',
'Name',
attribute_name.value))
attribute_value = attribute_b.attribute_value
exp_type = Name
rcv_type = type(attribute_value)
self.assertIsInstance(attribute_value, exp_type,
self.msg.format('attribute value', 'type',
exp_type, rcv_type))
self.assertEquals('Key1', attribute_value.name_value.value,
self.msg.format('name value', 'value',
'Key1',
attribute_value.name_value.value))
class TestResponseMessage(TestCase):
def setUp(self):
super(TestResponseMessage, self).setUp()
self.stream = BytearrayStream()
self.key_factory = KeyFactory()
self.secret_factory = SecretFactory()
self.msg = errors.ErrorStrings.BAD_EXP_RECV
self.create = (
b'\x42\x00\x7B\x01\x00\x00\x00\xC0\x42\x00\x7A\x01\x00\x00\x00\x48'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x92\x09\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x4F\x9A\x54\xE5\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\x68'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x7F\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x7C\x01\x00\x00\x00\x40\x42\x00\x57\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x02\x00\x00\x00\x00\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x66\x62\x34\x62\x35\x62\x39\x63\x2D\x36\x31\x38\x38\x2D\x34\x63'
b'\x36\x33\x2D\x38\x31\x34\x32\x2D\x66\x65\x39\x63\x33\x32\x38\x31'
b'\x32\x39\x66\x63\x00\x00\x00\x00')
self.register = (
b'\x42\x00\x7B\x01\x00\x00\x00\xB0\x42\x00\x7A\x01\x00\x00\x00\x48'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x92\x09\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x4F\x9A\x54\xE5\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\x58'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x7F\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x7C\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x35\x63\x39\x62\x38\x31\x65\x66\x2D\x34\x65\x65\x35\x2D\x34\x32'
b'\x63\x64\x2D\x62\x61\x32\x64\x2D\x63\x30\x30\x32\x66\x64\x64\x30'
b'\x63\x37\x62\x33\x00\x00\x00\x00')
self.get = (
b'\x42\x00\x7B\x01\x00\x00\x01\x28\x42\x00\x7A\x01\x00\x00\x00\x48'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x92\x09\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x4F\x9A\x54\xE7\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\xD0'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x0A\x00\x00\x00\x00'
b'\x42\x00\x7F\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x7C\x01\x00\x00\x00\xA8\x42\x00\x57\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x02\x00\x00\x00\x00\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2D\x36\x62\x65\x61\x2D\x34\x66'
b'\x62\x32\x2D\x62\x34\x35\x30\x2D\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00\x42\x00\x8F\x01\x00\x00\x00\x60'
b'\x42\x00\x40\x01\x00\x00\x00\x58\x42\x00\x42\x05\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x45\x01\x00\x00\x00\x20'
b'\x42\x00\x43\x08\x00\x00\x00\x18\x73\x67\x57\x80\x51\x01\x2A\x6D'
b'\x13\x4A\x85\x5E\x25\xC8\xCD\x5E\x4C\xA1\x31\x45\x57\x29\xD3\xC8'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x2A\x02\x00\x00\x00\x04\x00\x00\x00\xA8\x00\x00\x00\x00'
)
self.destroy = (
b'\x42\x00\x7B\x01\x00\x00\x00\xB0\x42\x00\x7A\x01\x00\x00\x00\x48'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x92\x09\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x4F\x9A\x54\xE5\x42\x00\x0D\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0F\x01\x00\x00\x00\x58'
b'\x42\x00\x5C\x05\x00\x00\x00\x04\x00\x00\x00\x14\x00\x00\x00\x00'
b'\x42\x00\x7F\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x7C\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x66\x62\x34\x62\x35\x62\x39\x63\x2D\x36\x31\x38\x38\x2D\x34\x63'
b'\x36\x33\x2D\x38\x31\x34\x32\x2D\x66\x65\x39\x63\x33\x32\x38\x31'
b'\x32\x39\x66\x63\x00\x00\x00\x00')
# kmip-testcases-v1.1 section 3.1.3
self.locate = (
b'\x42\x00\x7b\x01\x00\x00\x00\xb0\x42\x00\x7a\x01\x00\x00\x00\x48'
b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6a\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6b\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x92\x09\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x4f\x9a\x54\xe6\x42\x00\x0d\x02\x00\x00\x00\x04'
b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x0f\x01\x00\x00\x00\x58'
b'\x42\x00\x5c\x05\x00\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\x00'
b'\x42\x00\x7f\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\x7c\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x34\x39\x61\x31\x63\x61\x38\x38\x2d\x36\x62\x65\x61\x2d\x34\x66'
b'\x62\x32\x2d\x62\x34\x35\x30\x2d\x37\x65\x35\x38\x38\x30\x32\x63'
b'\x33\x30\x33\x38\x00\x00\x00\x00')
def tearDown(self):
super(TestResponseMessage, self).tearDown()
def test_create_response_read(self):
self.stream = BytearrayStream(self.create)
response_message = messages.ResponseMessage()
response_message.read(self.stream)
response_header = response_message.response_header
self.assertIsInstance(response_header, messages.ResponseHeader,
self.msg.format('response header', 'type',
messages.ResponseHeader,
type(response_header)))
protocol_version = response_header.protocol_version
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
self.msg.format('response header', 'value',
contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
self.msg.format('protocol version major',
'type', exp_type, rcv_type))
self.assertEqual(1, protocol_version_major.value,
self.msg.format('protocol version major', 'value',
1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor,
contents.ProtocolVersion.ProtocolVersionMinor,
self.msg.format('protocol version minor',
'type', exp_type, rcv_type))
self.assertEqual(1, protocol_version_minor.value,
self.msg.format('protocol version minor', 'value',
1, protocol_version_minor.value))
time_stamp = response_header.time_stamp
value = 0x4f9a54e5 # Fri Apr 27 10:12:21 CEST 2012
self.assertIsInstance(time_stamp, contents.TimeStamp,
self.msg.format('time stamp', 'value',
contents.TimeStamp,
type(time_stamp)))
self.assertEqual(time_stamp.value, value,
self.msg.format('time stamp', 'value',
time_stamp.value, value))
batch_count = response_header.batch_count
self.assertIsInstance(batch_count, contents.BatchCount,
self.msg.format('batch count', 'type',
contents.BatchCount,
type(batch_count)))
self.assertEqual(1, batch_count.value,
self.msg.format('batch count', 'value', 1,
batch_count.value))
batch_items = response_message.batch_items
self.assertIsInstance(batch_items, list,
self.msg.format('batch items', 'type',
list, type(batch_items)))
for batch_item in batch_items:
self.assertIsInstance(batch_item, messages.ResponseBatchItem,
self.msg.format('batch item', 'type',
messages.ResponseBatchItem,
type(batch_item)))
operation = batch_item.operation
self.assertIsInstance(operation, contents.Operation,
self.msg.format('operation', 'type',
contents.Operation,
type(operation)))
self.assertEqual(enums.Operation.CREATE, operation.enum,
self.msg.format('operation', 'value',
enums.Operation.CREATE,
operation.enum))
result_status = batch_item.result_status
self.assertIsInstance(result_status, contents.ResultStatus,
self.msg.format('result status', 'type',
contents.ResultStatus,
type(result_status)))
self.assertEqual(enums.ResultStatus.SUCCESS, result_status.enum,
self.msg.format('result status', 'value',
enums.ResultStatus.SUCCESS,
result_status.enum))
response_payload = batch_item.response_payload
exp_type = create.CreateResponsePayload
rcv_type = type(response_payload)
self.assertIsInstance(response_payload, exp_type,
self.msg.format('response payload', 'type',
exp_type, rcv_type))
object_type = response_payload.object_type
self.assertIsInstance(object_type, attr.ObjectType,
self.msg.format('object type', 'type',
attr.ObjectType,
type(object_type)))
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, object_type.enum,
self.msg.format('object type', 'value',
enums.ObjectType.SYMMETRIC_KEY,
object_type.enum))
unique_identifier = response_payload.unique_identifier
value = 'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
self.msg.format('unique identifier', 'type',
attr.UniqueIdentifier,
type(unique_identifier)))
self.assertEqual(value, unique_identifier.value,
self.msg.format('unique identifier', 'value',
unique_identifier.value, value))
def test_create_response_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
# Fri Apr 27 10:12:21 CEST 2012
time_stamp = contents.TimeStamp(0x4f9a54e5)
batch_count = contents.BatchCount(1)
response_header = messages.ResponseHeader(protocol_version=prot_ver,
time_stamp=time_stamp,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.CREATE)
result_status = contents.ResultStatus(enums.ResultStatus.SUCCESS)
object_type = attr.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
uuid = 'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
uniq_id = attr.UniqueIdentifier(uuid)
resp_pl = create.CreateResponsePayload(object_type=object_type,
unique_identifier=uniq_id)
batch_item = messages.ResponseBatchItem(operation=operation,
result_status=result_status,
response_payload=resp_pl)
rm = messages.ResponseMessage(response_header=response_header,
batch_items=[batch_item])
rm.write(self.stream)
result = self.stream.read()
len_exp = len(self.create)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('response message', 'write', len_exp,
len_rcv))
msg = "Bad response message write: encoding mismatch"
self.assertEqual(self.create, result, msg)
def test_get_response_read(self):
self.stream = BytearrayStream(self.get)
response_message = messages.ResponseMessage()
response_message.read(self.stream)
response_header = response_message.response_header
self.assertIsInstance(response_header, messages.ResponseHeader,
self.msg.format('response header', 'type',
messages.ResponseHeader,
type(response_header)))
protocol_version = response_header.protocol_version
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
self.msg.format('response header', 'value',
contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
self.msg.format('protocol version major', 'type',
exp_type, rcv_type))
self.assertEqual(1, protocol_version_major.value,
self.msg.format('protocol version major', 'value',
1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
self.msg.format('protocol version minor', 'type',
exp_type, rcv_type))
self.assertEqual(1, protocol_version_minor.value,
self.msg.format('protocol version minor', 'value',
1, protocol_version_minor.value))
time_stamp = response_header.time_stamp
value = 0x4f9a54e7 # Fri Apr 27 10:12:23 CEST 2012
self.assertIsInstance(time_stamp, contents.TimeStamp,
self.msg.format('time stamp', 'value',
contents.TimeStamp,
type(time_stamp)))
self.assertEqual(time_stamp.value, value,
self.msg.format('time stamp', 'value',
time_stamp.value, value))
batch_count = response_header.batch_count
self.assertIsInstance(batch_count, contents.BatchCount,
self.msg.format('batch count', 'type',
contents.BatchCount,
type(batch_count)))
self.assertEqual(1, batch_count.value,
self.msg.format('batch count', 'value', 1,
batch_count.value))
batch_items = response_message.batch_items
self.assertIsInstance(batch_items, list,
self.msg.format('batch items', 'type',
list, type(batch_items)))
for batch_item in batch_items:
self.assertIsInstance(batch_item, messages.ResponseBatchItem,
self.msg.format('batch item', 'type',
messages.ResponseBatchItem,
type(batch_item)))
operation = batch_item.operation
self.assertIsInstance(operation, contents.Operation,
self.msg.format('operation', 'type',
contents.Operation,
type(operation)))
self.assertEqual(enums.Operation.GET, operation.enum,
self.msg.format('operation', 'value',
enums.Operation.GET,
operation.enum))
result_status = batch_item.result_status
self.assertIsInstance(result_status, contents.ResultStatus,
self.msg.format('result status', 'type',
contents.ResultStatus,
type(result_status)))
self.assertEqual(enums.ResultStatus.SUCCESS, result_status.enum,
self.msg.format('result status', 'value',
enums.ResultStatus.SUCCESS,
result_status.enum))
response_payload = batch_item.response_payload
exp_type = get.GetResponsePayload
rcv_type = type(response_payload)
self.assertIsInstance(response_payload, exp_type,
self.msg.format('response payload', 'type',
exp_type, rcv_type))
object_type = response_payload.object_type
self.assertIsInstance(object_type, attr.ObjectType,
self.msg.format('object type', 'type',
attr.ObjectType,
type(object_type)))
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, object_type.enum,
self.msg.format('object type', 'value',
enums.ObjectType.SYMMETRIC_KEY,
object_type.enum))
unique_identifier = response_payload.unique_identifier
value = '49a1ca88-6bea-4fb2-b450-7e58802c3038'
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
self.msg.format('unique identifier', 'type',
attr.UniqueIdentifier,
type(unique_identifier)))
self.assertEqual(value, unique_identifier.value,
self.msg.format('unique identifier', 'value',
unique_identifier.value, value))
secret = response_payload.secret
self.assertIsInstance(secret, SymmetricKey,
self.msg.format('secret', 'type',
SymmetricKey, type(secret)))
key_block = secret.key_block
self.assertIsInstance(key_block, objects.KeyBlock,
self.msg.format('key_block', 'type',
objects.KeyBlock,
type(key_block)))
key_format_type = key_block.key_format_type
exp_type = KeyFormatType
rcv_type = type(key_format_type)
self.assertIsInstance(key_format_type, exp_type,
self.msg.format('key_format_type', 'type',
exp_type, rcv_type))
key_value = key_block.key_value
self.assertIsInstance(key_value, objects.KeyValue,
self.msg.format('key_value', 'type',
objects.KeyValue,
type(key_value)))
key_material = key_value.key_material
value = (
b'\x73\x67\x57\x80\x51\x01\x2A\x6D\x13\x4A\x85\x5E\x25\xC8\xCD'
b'\x5E\x4C\xA1\x31\x45\x57\x29\xD3\xC8')
self.assertIsInstance(key_material, objects.KeyMaterial,
self.msg.format('key_material', 'type',
objects.KeyMaterial,
type(key_material)))
exp = utils.hexlify_bytearray(value)
obs = utils.hexlify_bytearray(key_material.value)
self.assertEqual(exp, obs, self.msg.format('key_material', 'value',
exp, obs))
cryptographic_algorithm = key_block.cryptographic_algorithm
exp_type = attr.CryptographicAlgorithm
rcv_type = type(cryptographic_algorithm)
self.assertIsInstance(cryptographic_algorithm, exp_type,
self.msg.format('cryptographic_algorithm',
'type', exp_type, rcv_type))
exp = enums.CryptographicAlgorithm.TRIPLE_DES
obs = cryptographic_algorithm.enum
self.assertEqual(exp, obs,
self.msg.format('cryptographic_algorithm',
'value', exp, obs))
cryptographic_length = key_block.cryptographic_length
self.assertIsInstance(cryptographic_length,
attr.CryptographicLength,
self.msg.format('cryptographic_length',
'type',
attr.CryptographicLength,
type(cryptographic_length)))
exp = 168
obs = cryptographic_length.value
self.assertEqual(exp, obs, self.msg.format('cryptographic_length',
'value', exp, obs))
def test_get_response_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
# Fri Apr 27 10:12:23 CEST 2012
time_stamp = contents.TimeStamp(0x4f9a54e7)
batch_count = contents.BatchCount(1)
response_header = messages.ResponseHeader(protocol_version=prot_ver,
time_stamp=time_stamp,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.GET)
result_status = contents.ResultStatus(enums.ResultStatus.SUCCESS)
object_type = attr.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
uuid = '49a1ca88-6bea-4fb2-b450-7e58802c3038'
uniq_id = attr.UniqueIdentifier(uuid)
key = (
b'\x73\x67\x57\x80\x51\x01\x2A\x6D\x13\x4A\x85\x5E\x25\xC8\xCD\x5E'
b'\x4C\xA1\x31\x45\x57\x29\xD3\xC8')
crypto_algorithm = enums.CryptographicAlgorithm.TRIPLE_DES
cryptographic_length = 168
key_format_type = KeyFormatType(enums.KeyFormatType.RAW)
key_material = objects.KeyMaterial(key)
key_value = objects.KeyValue(key_material)
cryptographic_algorithm = CryptographicAlgorithm(crypto_algorithm)
cryptographic_length = CryptographicLength(cryptographic_length)
key_block = objects.KeyBlock(
key_format_type=key_format_type,
key_compression_type=None,
key_value=key_value,
cryptographic_algorithm=cryptographic_algorithm,
cryptographic_length=cryptographic_length,
key_wrapping_data=None)
secret = SymmetricKey(key_block)
resp_pl = get.GetResponsePayload(object_type=object_type,
unique_identifier=uniq_id,
secret=secret)
batch_item = messages.ResponseBatchItem(operation=operation,
result_status=result_status,
response_payload=resp_pl)
rm = messages.ResponseMessage(response_header=response_header,
batch_items=[batch_item])
rm.write(self.stream)
result = self.stream.read()
len_exp = len(self.get)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('get response message', 'write',
len_exp, len_rcv))
msg = "Bad get response message write: encoding mismatch"
print(self.get)
print(result)
self.assertEqual(self.get, result, msg)
def test_destroy_response_read(self):
self.stream = BytearrayStream(self.destroy)
response_message = messages.ResponseMessage()
response_message.read(self.stream)
response_header = response_message.response_header
msg = "Bad response header type: expected {0}, received{1}"
self.assertIsInstance(response_header, messages.ResponseHeader,
msg.format(messages.ResponseHeader,
type(response_header)))
protocol_version = response_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
time_stamp = response_header.time_stamp
value = 0x4f9a54e5 # Fri Apr 27 10:12:21 CEST 2012
self.assertIsInstance(time_stamp, contents.TimeStamp,
self.msg.format('time stamp', 'value',
contents.TimeStamp,
type(time_stamp)))
self.assertEqual(time_stamp.value, value,
self.msg.format('time stamp', 'value',
time_stamp.value, value))
batch_count = response_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = response_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
for batch_item in batch_items:
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.ResponseBatchItem,
msg.format(messages.ResponseBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
exp_value = enums.Operation.DESTROY
rcv_value = operation.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
result_status = batch_item.result_status
self.assertIsInstance(result_status, contents.ResultStatus,
self.msg.format('result status', 'type',
contents.ResultStatus,
type(result_status)))
self.assertEqual(enums.ResultStatus.SUCCESS, result_status.enum,
self.msg.format('result status', 'value',
enums.ResultStatus.SUCCESS,
result_status.enum))
response_payload = batch_item.response_payload
msg = "Bad response payload type: expected {0}, received {1}"
exp_type = destroy.DestroyResponsePayload
rcv_type = type(response_payload)
self.assertIsInstance(response_payload, exp_type,
msg.format(exp_type, rcv_type))
unique_identifier = response_payload.unique_identifier
msg = "Bad unique identifier type: expected {0}, received {1}"
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
msg.format(attr.UniqueIdentifier,
type(unique_identifier)))
msg = "Bad unique identifier value: expected {0}, received {1}"
exp_value = 'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
rcv_value = unique_identifier.value
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
def test_destroy_response_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
# Fri Apr 27 10:12:21 CEST 2012
time_stamp = contents.TimeStamp(0x4f9a54e5)
batch_count = contents.BatchCount(1)
resp_hdr = messages.ResponseHeader(protocol_version=prot_ver,
time_stamp=time_stamp,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.DESTROY)
result_status = contents.ResultStatus(enums.ResultStatus.SUCCESS)
uuid = attr.UniqueIdentifier('fb4b5b9c-6188-4c63-8142-fe9c328129fc')
resp_pl = destroy.DestroyResponsePayload(unique_identifier=uuid)
batch_item = messages.ResponseBatchItem(operation=operation,
result_status=result_status,
response_payload=resp_pl)
response_message = messages.ResponseMessage(response_header=resp_hdr,
batch_items=[batch_item])
response_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.destroy)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('response message', 'write',
len_exp, len_rcv))
msg = "Bad response message write: encoding mismatch"
self.assertEqual(self.destroy, result, msg)
def test_register_response_read(self):
self.stream = BytearrayStream(self.register)
response_message = messages.ResponseMessage()
response_message.read(self.stream)
response_header = response_message.response_header
msg = "Bad response header type: expected {0}, received{1}"
self.assertIsInstance(response_header, messages.ResponseHeader,
msg.format(messages.ResponseHeader,
type(response_header)))
protocol_version = response_header.protocol_version
msg = "Bad protocol version type: expected {0}, received {1}"
self.assertIsInstance(protocol_version, contents.ProtocolVersion,
msg.format(contents.ProtocolVersion,
type(protocol_version)))
protocol_version_major = protocol_version.protocol_version_major
msg = "Bad protocol version major type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMajor
rcv_type = type(protocol_version_major)
self.assertIsInstance(protocol_version_major, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version major value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_major.value,
msg.format(1, protocol_version_major.value))
protocol_version_minor = protocol_version.protocol_version_minor
msg = "Bad protocol version minor type: expected {0}, received {1}"
exp_type = contents.ProtocolVersion.ProtocolVersionMinor
rcv_type = type(protocol_version_minor)
self.assertIsInstance(protocol_version_minor, exp_type,
msg.format(exp_type, rcv_type))
msg = "Bad protocol version minor value: expected {0}, received {1}"
self.assertEqual(1, protocol_version_minor.value,
msg.format(1, protocol_version_minor.value))
time_stamp = response_header.time_stamp
value = 0x4f9a54e5 # Fri Apr 27 10:12:21 CEST 2012
self.assertIsInstance(time_stamp, contents.TimeStamp,
self.msg.format('time stamp', 'value',
contents.TimeStamp,
type(time_stamp)))
self.assertEqual(time_stamp.value, value,
self.msg.format('time stamp', 'value',
time_stamp.value, value))
batch_count = response_header.batch_count
msg = "Bad batch count type: expected {0}, received {1}"
self.assertIsInstance(batch_count, contents.BatchCount,
msg.format(contents.BatchCount,
type(batch_count)))
msg = "Bad batch count value: expected {0}, received {1}"
self.assertEqual(1, batch_count.value,
msg.format(1, batch_count.value))
batch_items = response_message.batch_items
msg = "Bad batch items type: expected {0}, received {1}"
self.assertIsInstance(batch_items, list,
msg.format(list, type(batch_items)))
self.assertEquals(1, len(batch_items),
self.msg.format('batch items', 'length',
1, len(batch_items)))
for batch_item in batch_items:
msg = "Bad batch item type: expected {0}, received {1}"
self.assertIsInstance(batch_item, messages.ResponseBatchItem,
msg.format(messages.ResponseBatchItem,
type(batch_item)))
operation = batch_item.operation
msg = "Bad operation type: expected {0}, received {1}"
self.assertIsInstance(operation, contents.Operation,
msg.format(contents.Operation,
type(operation)))
msg = "Bad operation value: expected {0}, received {1}"
exp_value = enums.Operation.REGISTER
rcv_value = operation.enum
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
result_status = batch_item.result_status
self.assertIsInstance(result_status, contents.ResultStatus,
self.msg.format('result status', 'type',
contents.ResultStatus,
type(result_status)))
self.assertEqual(enums.ResultStatus.SUCCESS, result_status.enum,
self.msg.format('result status', 'value',
enums.ResultStatus.SUCCESS,
result_status.enum))
response_payload = batch_item.response_payload
msg = "Bad response payload type: expected {0}, received {1}"
exp_type = register.RegisterResponsePayload
rcv_type = type(response_payload)
self.assertIsInstance(response_payload, exp_type,
msg.format(exp_type, rcv_type))
unique_identifier = response_payload.unique_identifier
msg = "Bad unique identifier type: expected {0}, received {1}"
self.assertIsInstance(unique_identifier, attr.UniqueIdentifier,
msg.format(attr.UniqueIdentifier,
type(unique_identifier)))
msg = "Bad unique identifier value: expected {0}, received {1}"
exp_value = '5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3'
rcv_value = unique_identifier.value
self.assertEqual(exp_value, rcv_value,
msg.format(exp_value, rcv_value))
def test_register_response_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
# Fri Apr 27 10:12:21 CEST 2012
time_stamp = contents.TimeStamp(0x4f9a54e5)
batch_count = contents.BatchCount(1)
resp_hdr = messages.ResponseHeader(protocol_version=prot_ver,
time_stamp=time_stamp,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.REGISTER)
result_status = contents.ResultStatus(enums.ResultStatus.SUCCESS)
uuid = attr.UniqueIdentifier('5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3')
resp_pl = register.RegisterResponsePayload(unique_identifier=uuid)
batch_item = messages.ResponseBatchItem(operation=operation,
result_status=result_status,
response_payload=resp_pl)
response_message = messages.ResponseMessage(response_header=resp_hdr,
batch_items=[batch_item])
response_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.register)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('response message', 'write',
len_exp, len_rcv))
msg = "Bad response message write: encoding mismatch"
self.assertEqual(self.register, result, msg)
def test_locate_response_write(self):
prot_ver = contents.ProtocolVersion.create(1, 1)
# Fri Apr 27 10:12:22 CEST 2012
time_stamp = contents.TimeStamp(0x4f9a54e6)
batch_count = contents.BatchCount(1)
resp_hdr = messages.ResponseHeader(protocol_version=prot_ver,
time_stamp=time_stamp,
batch_count=batch_count)
operation = contents.Operation(enums.Operation.LOCATE)
result_status = contents.ResultStatus(enums.ResultStatus.SUCCESS)
uuid = attr.UniqueIdentifier('49a1ca88-6bea-4fb2-b450-7e58802c3038')
resp_pl = locate.LocateResponsePayload(unique_identifiers=[uuid])
batch_item = messages.ResponseBatchItem(operation=operation,
result_status=result_status,
response_payload=resp_pl)
response_message = messages.ResponseMessage(response_header=resp_hdr,
batch_items=[batch_item])
response_message.write(self.stream)
result = self.stream.read()
len_exp = len(self.locate)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.msg.format('response message', 'write',
len_exp, len_rcv))
msg = "Bad response message write: encoding mismatch"
self.assertEqual(self.locate, result, msg)
|
{
"content_hash": "a1ce17503c1a3aef68dc0f169b1ab34e",
"timestamp": "",
"source": "github",
"line_count": 1746,
"max_line_length": 79,
"avg_line_length": 53.379152348224515,
"alnum_prop": 0.5499248927038627,
"repo_name": "callidus/PyKMIP",
"id": "514555705e42a930e7be1dddf0cf3c353c13438d",
"size": "93846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kmip/tests/unit/core/messages/test_messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "945669"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
from functools import wraps
from typing import Callable, Optional, Sequence, Tuple, TypeVar, cast
from flask import Response
from airflow.api_connexion.exceptions import PermissionDenied, Unauthenticated
from airflow.utils.airflow_flask_app import get_airflow_app
T = TypeVar("T", bound=Callable)
def check_authentication() -> None:
"""Checks that the request has valid authorization information."""
for auth in get_airflow_app().api_auth:
response = auth.requires_authentication(Response)()
if response.status_code == 200:
return
# since this handler only checks authentication, not authorization,
# we should always return 401
raise Unauthenticated(headers=response.headers)
def requires_access(permissions: Optional[Sequence[Tuple[str, str]]] = None) -> Callable[[T], T]:
"""Factory for decorator that checks current user's permissions against required permissions."""
appbuilder = get_airflow_app().appbuilder
appbuilder.sm.sync_resource_permissions(permissions)
def requires_access_decorator(func: T):
@wraps(func)
def decorated(*args, **kwargs):
check_authentication()
if appbuilder.sm.check_authorization(permissions, kwargs.get('dag_id')):
return func(*args, **kwargs)
raise PermissionDenied()
return cast(T, decorated)
return requires_access_decorator
|
{
"content_hash": "57a025f108bd71cb8c3ab6f689d06e7a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 100,
"avg_line_length": 37.23684210526316,
"alnum_prop": 0.7053003533568905,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "6c84181f91bd3755a819d752b3a93fa4f1ca1f44",
"size": "2201",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/api_connexion/security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GradientCorrectnessTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(set((True, context.executing_eagerly())))
def testMultipleOutputChainedGradients(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
x = constant_op.constant(1.0, dtype=dtypes.float32)
tape.watch(x)
yexp = math_ops.exp(x)
yexplog = math_ops.log(yexp)
grads = tape.gradient([yexp, yexplog], [x])
grad_vals = self.evaluate(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testIdentityGradient(self, use_tape):
x = constant_op.constant(3.)
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
tape.watch(x)
dx_dx = tape.gradient(x, x)
self.assertAllClose(1., self.evaluate(dx_dx))
@parameterized.parameters(set((True, context.executing_eagerly())))
def testIntegerIdentityGradient(self, use_tape):
x = constant_op.constant(3)
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
tape.watch(x)
dx_dx = tape.gradient(x, x)
self.assertAllClose(1, self.evaluate(dx_dx))
@parameterized.parameters(set((True, context.executing_eagerly())))
def testGradientWithIntegerPath(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
x = constant_op.constant([3.9, 4.1])
tape.watch(x)
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = x * k
dy_dx = tape.gradient(y, x)
self.assertAllClose([3., 4.], self.evaluate(dy_dx))
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient1(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
x = constant_op.constant([3.9, 4.1])
tape.watch(x)
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = k * k
dy_dx = tape.gradient(y, x)
self.assertIsNone(dy_dx)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient2(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
k = constant_op.constant([3, 4])
x = math_ops.cast(k, dtypes.float32)
tape.watch([k, x])
y = x * x
dy_dk = tape.gradient(y, k)
self.assertIsNone(dy_dk)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient3(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
k = constant_op.constant([3, 4])
tape.watch(k)
m = k * k
dm_dk = tape.gradient(m, k)
self.assertIsNone(dm_dk)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient4(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
k = constant_op.constant([3, 4])
tape.watch(k)
m = k * k * k
dm_dk = tape.gradient(m, k)
self.assertIsNone(dm_dk)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient5(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
k = constant_op.constant([3, 4])
tape.watch(k)
m = k * k
n = m * m
dn_dk = tape.gradient(n, k)
self.assertIsNone(dn_dk)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testNoIntegerGradient6(self, use_tape):
with test_util.AbstractGradientTape(
use_tape=use_tape, persistent=True) as tape:
k = constant_op.constant(3)
tape.watch(k)
x = math_ops.cast(k, dtypes.float32)
grad_1 = tape.gradient(k * k, k)
grad_2 = tape.gradient(x * x, k)
grad_3 = tape.gradient(math_ops.square(k), k)
grad_4 = tape.gradient(math_ops.square(x), k)
self.assertIsNone(grad_1)
self.assertIsNone(grad_2)
self.assertIsNone(grad_3)
self.assertIsNone(grad_4)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "acfb07f38081f066b509f765bce8a58b",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 71,
"avg_line_length": 35.225563909774436,
"alnum_prop": 0.6723585912486659,
"repo_name": "annarev/tensorflow",
"id": "ddbe514fa9d20bbb4fbb00d208cb0cb405944b00",
"size": "5374",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/gradient_correctness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from .. import models
class DictionaryOperations(object):
"""DictionaryOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null dictionary value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get empty dictionary value {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_empty(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{str}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null_value(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with null value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/nullvalue'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_null_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with null key.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/nullkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_empty_string_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with key as empty string.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/keyemptystring'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Dictionary value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_boolean_tfft(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": true, "1": false, "2": false, "3":
true }.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/boolean/tfft'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_boolean_tfft(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": true, "1": false, "2": false, "3":
true }.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/boolean/tfft'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{bool}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": true, "1": null, "2": false }.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/boolean/true.null.false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_boolean_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value '{"0": true, "1": "boolean", "2": false}'.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/boolean/true.boolean.false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_integer_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": -1, "2": 3, "3": 300}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/integer/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_integer_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": 1, "1": -1, "2": 3, "3": 300}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/integer/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{int}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": null, "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/integer/1.null.zero'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_int_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": "integer", "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/integer/1.integer.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_long_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": -1, "2": 3, "3": 300}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/long/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_long_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": 1, "1": -1, "2": 3, "3": 300}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/long/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{long}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_long_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get long dictionary value {"0": 1, "1": null, "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/long/1.null.zero'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_long_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get long dictionary value {"0": 1, "1": "integer", "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/long/1.integer.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_float_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/float/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_float_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/float/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{float}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_float_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0.0, "1": null, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/float/0.0-null-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_float_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": 1.0, "1": "number", "2": 0.0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/float/1.number.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_double_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/double/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_double_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/double/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{float}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_double_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0.0, "1": null, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/double/0.0-null-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_double_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": 1.0, "1": "number", "2": 0.0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/double/1.number.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_string_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo1", "1": "foo2", "2": "foo3"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/string/foo1.foo2.foo3'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_string_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "foo1", "1": "foo2", "2": "foo3"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/string/foo1.foo2.foo3'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{str}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_string_with_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo", "1": null, "2": "foo2"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/string/foo.null.foo2'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_string_with_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo", "1": 123, "2": "foo2"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/string/foo.123.foo2'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": "2000-12-01", "1": "1980-01-02",
"2": "1492-10-12"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "2000-12-01", "1": "1980-01-02", "2":
"1492-10-12"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{date}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_date_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2012-01-01", "1": null, "2":
"1776-07-04"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_invalid_chars(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2011-03-22", "1": "date"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date/invalidchars'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get date-time dictionary value {"0": "2000-12-01t00:00:01z", "1":
"1980-01-02T00:11:35+01:00", "2": "1492-10-12T10:15:01-08:00"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_time_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "2000-12-01t00:00:01z", "1":
"1980-01-02T00:11:35+01:00", "2": "1492-10-12T10:15:01-08:00"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{iso-8601}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_date_time_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2000-12-01t00:00:01z", "1": null}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_invalid_chars(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2000-12-01t00:00:01z", "1":
"date-time"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time/invalidchars'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_rfc1123_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get date-time-rfc1123 dictionary value {"0": "Fri, 01 Dec 2000
00:00:01 GMT", "1": "Wed, 02 Jan 1980 00:11:35 GMT", "2": "Wed, 12
Oct 1492 10:15:01 GMT"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time-rfc1123/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{rfc-1123}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_time_rfc1123_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": "Fri, 01 Dec 2000 00:00:01 GMT", "1":
"Wed, 02 Jan 1980 00:11:35 GMT", "2": "Wed, 12 Oct 1492 10:15:01
GMT"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/date-time-rfc1123/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{rfc-1123}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_duration_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get duration dictionary value {"0": "P123DT22H14M12.011S", "1":
"P5DT1H0M0S"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/duration/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{duration}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_duration_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "P123DT22H14M12.011S", "1": "P5DT1H0M0S"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/duration/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{duration}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_byte_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get byte dictionary value {"0": hex(FF FF FF FA), "1": hex(01 02 03),
"2": hex (25, 29, 43)} with each item encoded in base64.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/byte/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bytearray}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_byte_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put the dictionary value {"0": hex(FF FF FF FA), "1": hex(01 02 03),
"2": hex (25, 29, 43)} with each elementencoded in base 64.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/byte/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{bytearray}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_byte_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get byte dictionary value {"0": hex(FF FF FF FA), "1": null} with the
first item base64 encoded.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/byte/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bytearray}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_base64_url(
self, custom_headers=None, raw=False, **operation_config):
"""Get base64url dictionary value {"0": "a string that gets encoded with
base64url", "1": "test string", "2": "Lorem ipsum"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/prim/base64url/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{base64}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type null value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get empty dictionary of complex type {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with null item {"0": {"integer": 1,
"string": "2"}, "1": null, "2": {"integer": 5, "string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with empty item {"0": {"integer": 1,
"string": "2"}, "1:" {}, "2": {"integer": 5, "string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with {"0": {"integer": 1, "string":
"2"}, "1": {"integer": 3, "string": "4"}, "2": {"integer": 5,
"string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_complex_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put an dictionary of complex type with values {"0": {"integer": 1,
"string": "2"}, "1": {"integer": 3, "string": "4"}, "2": {"integer":
5, "string": "6"}}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/complex/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{Widget}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_array_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get a null array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an empty dictionary {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionary of array of strings {"0": ["1", "2", "3"], "1":
null, "2": ["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an array of array of strings [{"0": ["1", "2", "3"], "1": [], "2":
["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get an array of array of strings {"0": ["1", "2", "3"], "1": ["4",
"5", "6"], "2": ["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_array_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put An array of array of strings {"0": ["1", "2", "3"], "1": ["4",
"5", "6"], "2": ["7", "8", "9"]}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/array/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{[str]}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_dictionary_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries with value null.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": null, "2":
{"7": "seven", "8": "eight", "9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {}, "2":
{"7": "seven", "8": "eight", "9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {"4":
"four", "5": "five", "6": "six"}, "2": {"7": "seven", "8": "eight",
"9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_dictionary_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {"4":
"four", "5": "five", "6": "six"}, "2": {"7": "seven", "8": "eight",
"9": "nine"}}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/dictionary/dictionary/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{{str}}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
{
"content_hash": "2cc41af0b5d363456a80efa115b1f0ca",
"timestamp": "",
"source": "github",
"line_count": 2839,
"max_line_length": 84,
"avg_line_length": 36.152518492426914,
"alnum_prop": 0.6227481317653478,
"repo_name": "haocs/autorest",
"id": "3add16550863e9b0af28e7c02d8df344f1f652cc",
"size": "103111",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyDictionary/autorestswaggerbatdictionaryservice/operations/dictionary_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12942"
},
{
"name": "C#",
"bytes": "12768592"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "Go",
"bytes": "142004"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "6303425"
},
{
"name": "JavaScript",
"bytes": "4746656"
},
{
"name": "PowerShell",
"bytes": "44986"
},
{
"name": "Python",
"bytes": "2283819"
},
{
"name": "Ruby",
"bytes": "301935"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TypeScript",
"bytes": "179578"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db.models import F
from django.db.models import Q
from machina.core.db.models import get_model
from machina.core.loading import get_class
Forum = get_model('forum', 'Forum')
ForumReadTrack = get_model('forum_tracking', 'ForumReadTrack')
TopicReadTrack = get_model('forum_tracking', 'TopicReadTrack')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
class TrackingHandler(object):
""" Provides utility methods to compute unread forums and topics.
The TrackingHandler allows to filter list of forums and list of topics in order to get only the
forums which contain unread topics or the unread topics.
"""
def __init__(self, request=None):
self.request = request
self.perm_handler = request.forum_permission_handler if request \
else PermissionHandler()
def get_unread_forums(self, user):
""" Returns the list of unread forums for the given user. """
return self.get_unread_forums_from_list(
user, self.perm_handler.get_readable_forums(Forum.objects.all(), user))
def get_unread_forums_from_list(self, user, forums):
""" Returns the list of unread forums for the given user from a given list of forums. """
unread_forums = []
# A user which is not authenticated will never see a forum as unread
if not user.is_authenticated:
return unread_forums
unread = ForumReadTrack.objects.get_unread_forums_from_list(forums, user)
unread_forums.extend(unread)
return unread_forums
def get_unread_topics(self, topics, user):
""" Returns a list of unread topics for the given user from a given set of topics. """
unread_topics = []
# A user which is not authenticated will never see a topic as unread.
# If there are no topics to consider, we stop here.
if not user.is_authenticated or topics is None or not len(topics):
return unread_topics
# A topic can be unread if a track for itself exists with a mark time that
# is less important than its update date.
topic_ids = [topic.id for topic in topics]
topic_tracks = TopicReadTrack.objects.filter(topic__in=topic_ids, user=user)
tracked_topics = dict(topic_tracks.values_list('topic__pk', 'mark_time'))
if tracked_topics:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if topic.id in tracked_topics.keys() \
and topic_last_modification_date > tracked_topics[topic.id]:
unread_topics.append(topic)
# A topic can be unread if a track for its associated forum exists with
# a mark time that is less important than its creation or update date.
forum_ids = [topic.forum_id for topic in topics]
forum_tracks = ForumReadTrack.objects.filter(forum_id__in=forum_ids, user=user)
tracked_forums = dict(forum_tracks.values_list('forum__pk', 'mark_time'))
if tracked_forums:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if ((topic.forum_id in tracked_forums.keys() and topic.id not in tracked_topics) and
topic_last_modification_date > tracked_forums[topic.forum_id]):
unread_topics.append(topic)
# A topic can be unread if no tracks exists for it
for topic in topics:
if topic.forum_id not in tracked_forums and topic.id not in tracked_topics:
unread_topics.append(topic)
return list(set(unread_topics))
def mark_forums_read(self, forums, user):
""" Marks a list of forums as read. """
if not forums or not user.is_authenticated:
return
forums = sorted(forums, key=lambda f: f.level)
# Update all forum tracks to the current date for the considered forums
for forum in forums:
forum_track = ForumReadTrack.objects.get_or_create(forum=forum, user=user)[0]
forum_track.save()
# Delete all the unnecessary topic tracks
TopicReadTrack.objects.filter(topic__forum__in=forums, user=user).delete()
# Update parent forum tracks
self._update_parent_forum_tracks(forums[0], user)
def mark_topic_read(self, topic, user):
""" Marks a topic as read. """
if not user.is_authenticated:
return
forum = topic.forum
try:
forum_track = ForumReadTrack.objects.get(forum=forum, user=user)
except ForumReadTrack.DoesNotExist:
forum_track = None
if forum_track is None \
or (topic.last_post_on and forum_track.mark_time < topic.last_post_on):
topic_track, created = TopicReadTrack.objects.get_or_create(topic=topic, user=user)
if not created:
topic_track.save() # mark_time filled
# If no other topic is unread inside the considered forum, the latter should also be
# marked as read.
unread_topics = forum.topics.filter(
Q(tracks__user=user, tracks__mark_time__lt=F('last_post_on')) |
Q(forum__tracks__user=user, forum__tracks__mark_time__lt=F('last_post_on'),
tracks__isnull=True)).exclude(id=topic.id)
forum_topic_tracks = TopicReadTrack.objects.filter(topic__forum=forum, user=user)
if not unread_topics.exists() and (
forum_track is not None or
forum_topic_tracks.count() == forum.topics.filter(approved=True).count()):
# The topics that are marked as read inside the forum for the given user will be
# deleted while the forum track associated with the user must be created or updated.
# This is done only if there are as many topic tracks as approved topics in case
# the related forum has not beem previously marked as read.
TopicReadTrack.objects.filter(topic__forum=forum, user=user).delete()
forum_track, _ = ForumReadTrack.objects.get_or_create(forum=forum, user=user)
forum_track.save()
# Update parent forum tracks
self._update_parent_forum_tracks(forum, user)
def _update_parent_forum_tracks(self, forum, user):
for forum in forum.get_ancestors(ascending=True):
# If no other topics are unread inside the considered forum, the latter should also
# be marked as read.
unread_topics = forum.topics.filter(
Q(tracks__user=user, tracks__mark_time__lt=F('last_post_on')) |
Q(forum__tracks__user=user, forum__tracks__mark_time__lt=F('last_post_on'),
tracks__isnull=True))
if unread_topics.exists():
break
# The topics that are marked as read inside the forum for the given user
# wil be deleted while the forum track associated with the user must be
# created or updated.
TopicReadTrack.objects.filter(topic__forum=forum, user=user).delete()
forum_track, _ = ForumReadTrack.objects.get_or_create(forum=forum, user=user)
forum_track.save()
|
{
"content_hash": "0aea88e67d54f64fd6f70ec9b3674798",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 100,
"avg_line_length": 46.12422360248447,
"alnum_prop": 0.6302181524373822,
"repo_name": "franga2000/django-machina",
"id": "105deda598c6a0969f1900295e295a74206ce3b3",
"size": "7451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machina/apps/forum_tracking/handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "HTML",
"bytes": "138474"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "696565"
}
],
"symlink_target": ""
}
|
import tkinter.tix as tk
from . import Settings
class InputFrame(tk.Frame):
def __init__(self, input_processor, label=None, master=None):
if label:
self.label = tk.Label(self, bg=Settings.bg_color, fg=Settings.text_color, text=label)
self.label.grid(row=0, column=0)
else:
self.label = None
self.entrybox = None
self.input_value = None
self.prev_button = None
self.next_button = None
self.input_processor = input_processor
tk.Frame.__init__(self, master, bg=Settings.bg_color)
def allow_input(self):
"""
Use an entry box to retrieve user input
"""
label = tk.Label(self, bg=Settings.bg_color, fg=Settings.text_color, text="Input>> ", font=Settings.heading)
label.grid(row=0, column=0, padx=20, pady=15)
self.entrybox = tk.Entry(self, exportselection=0, width=50, bg=Settings.bg_color,
fg=Settings.text_color, bd=0, font=Settings.standard, insertbackground=Settings.text_color)
self.entrybox.grid(row=0, column=1, padx=10)
self.input_value = tk.StringVar()
self.input_value.set("Enter input")
self.entrybox['textvariable'] = self.input_value
self.set_focus()
self.entrybox.bind('<Key-Return>', self.process_input)
def nav_buttons(self):
self.prev_button = tk.Button(self, takefocus=False, text="Prev", width=8, underline=0, command=self.nav_prev)
self.next_button = tk.Button(self, takefocus=False, text="Next", width=8, underline=0, command=self.nav_next)
if self.entrybox:
self.prev_button.grid(row=0, column=2, padx=10)
self.next_button.grid(row=0, column=3, padx=10)
def nav_prev(self):
self.input_processor("prev")
def nav_next(self):
self.input_processor("next")
def set_focus(self):
self.bind_class("Entry", "<Key-Return>", lambda x: None)
self.entrybox.focus_set()
self.entrybox.select_range(0, tk.END)
def process_input(self, event):
input_string = self.get_input()
self.input_processor(input_string)
def get_input(self):
if self.entrybox:
contents = self.input_value.get()
return contents
def select_input(self):
if self.entrybox:
self.entrybox.select_range(0, tk.END)
def clear_input(self):
if self.entrybox:
self.entrybox.delete(0, tk.END)
def close_frame(self):
self.destroy()
|
{
"content_hash": "da220b6217e97b715a11e0c1a0c33e6b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 124,
"avg_line_length": 35.12162162162162,
"alnum_prop": 0.5998460946517892,
"repo_name": "hidat/audio_pipeline",
"id": "5e9ad23b49570453b192ceaac99d2c63c9128510",
"size": "2599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio_pipeline/tb_ui/view/InputFrame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "343203"
}
],
"symlink_target": ""
}
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from marconicafe.marconi.common.models import (
BaseMarconiModel, BaseMarconiListModel)
class MessageModel(BaseMarconiModel):
def __init__(
self, ttl=None, body=None):
super(MessageModel, self).__init__()
self.ttl = ttl
self.body = body
def _obj_to_dict(self):
return {"ttl": self.ttl, "body": self.body}
class MessageListModel(BaseMarconiListModel):
def _obj_to_dict(self):
ret_val = []
for model in self:
ret_val.append(model._obj_to_dict())
return ret_val
|
{
"content_hash": "ada129c5a55bcadc77df33fc63a313ff",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 72,
"avg_line_length": 30.243243243243242,
"alnum_prop": 0.6961572832886506,
"repo_name": "rackerlabs/marconicafe",
"id": "20d1c1e00c9300ac220dc0aff8768864c26e3803",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marconicafe/marconi/models/requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12442"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_galleryimage'),
]
operations = [
migrations.AlterUniqueTogether(
name='galleryimage',
unique_together=set([('gallery', 'slug')]),
),
]
|
{
"content_hash": "0757b97fbc4aa257383faf26a089e8cd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 20.647058823529413,
"alnum_prop": 0.5982905982905983,
"repo_name": "evanepio/dotmanca",
"id": "9a02df2952f65e414cc4aff7c48df777a797abe0",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gallery/migrations/0003_auto_20170701_1436.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2704"
},
{
"name": "Dockerfile",
"bytes": "1874"
},
{
"name": "HTML",
"bytes": "12635"
},
{
"name": "Makefile",
"bytes": "192"
},
{
"name": "Python",
"bytes": "83822"
},
{
"name": "Shell",
"bytes": "860"
}
],
"symlink_target": ""
}
|
"""A non-blocking, single-threaded TCP server."""
import errno
import os
import socket
import ssl
from tornado import gen
from tornado.log import app_log
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
from tornado import process
from tornado.util import errno_from_exception
import typing
from typing import Union, Dict, Any, Iterable, Optional, Awaitable
if typing.TYPE_CHECKING:
from typing import Callable, List # noqa: F401
class TCPServer(object):
r"""A non-blocking, single-threaded TCP server.
To use `TCPServer`, define a subclass which overrides the `handle_stream`
method. For example, a simple echo server could be defined like this::
from tornado.tcpserver import TCPServer
from tornado.iostream import StreamClosedError
from tornado import gen
class EchoServer(TCPServer):
async def handle_stream(self, stream, address):
while True:
try:
data = await stream.read_until(b"\n")
await stream.write(data)
except StreamClosedError:
break
To make this server serve SSL traffic, send the ``ssl_options`` keyword
argument with an `ssl.SSLContext` object. For compatibility with older
versions of Python ``ssl_options`` may also be a dictionary of keyword
arguments for the `ssl.wrap_socket` method.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
TCPServer(ssl_options=ssl_ctx)
`TCPServer` initialization follows one of three patterns:
1. `listen`: simple single-process::
server = TCPServer()
server.listen(8888)
IOLoop.current().start()
2. `bind`/`start`: simple multi-process::
server = TCPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.current().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `TCPServer` constructor. `start` will always start
the server on the default singleton `.IOLoop`.
3. `add_sockets`: advanced multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
server = TCPServer()
server.add_sockets(sockets)
IOLoop.current().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`~tornado.netutil.bind_sockets`.
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
"""
def __init__(
self,
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
max_buffer_size: Optional[int] = None,
read_chunk_size: Optional[int] = None,
) -> None:
self.ssl_options = ssl_options
self._sockets = {} # type: Dict[int, socket.socket]
self._handlers = {} # type: Dict[int, Callable[[], None]]
self._pending_sockets = [] # type: List[socket.socket]
self._started = False
self._stopped = False
self.max_buffer_size = max_buffer_size
self.read_chunk_size = read_chunk_size
# Verify the SSL options. Otherwise we don't get errors until clients
# connect. This doesn't verify that the keys are legitimate, but
# the SSL module doesn't do that until there is a connected socket
# which seems like too much work
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
# Only certfile is required: it can contain both keys
if "certfile" not in self.ssl_options:
raise KeyError('missing key "certfile" in ssl_options')
if not os.path.exists(self.ssl_options["certfile"]):
raise ValueError(
'certfile "%s" does not exist' % self.ssl_options["certfile"]
)
if "keyfile" in self.ssl_options and not os.path.exists(
self.ssl_options["keyfile"]
):
raise ValueError(
'keyfile "%s" does not exist' % self.ssl_options["keyfile"]
)
def listen(self, port: int, address: str = "") -> None:
"""Starts accepting connections on the given port.
This method may be called more than once to listen on multiple ports.
`listen` takes effect immediately; it is not necessary to call
`TCPServer.start` afterwards. It is, however, necessary to start
the `.IOLoop`.
"""
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
def add_sockets(self, sockets: Iterable[socket.socket]) -> None:
"""Makes this server start accepting connections on the given sockets.
The ``sockets`` parameter is a list of socket objects such as
those returned by `~tornado.netutil.bind_sockets`.
`add_sockets` is typically used in combination with that
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
for sock in sockets:
self._sockets[sock.fileno()] = sock
self._handlers[sock.fileno()] = add_accept_handler(
sock, self._handle_connection
)
def add_socket(self, socket: socket.socket) -> None:
"""Singular version of `add_sockets`. Takes a single socket object."""
self.add_sockets([socket])
def bind(
self,
port: int,
address: Optional[str] = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = 128,
reuse_port: bool = False,
) -> None:
"""Binds this server to the given port on the given address.
To start the server, call `start`. If you want to run this server
in a single process, you can call `listen` as a shortcut to the
sequence of `bind` and `start` calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen <socket.socket.listen>`. The ``reuse_port`` argument
has the same meaning as for `.bind_sockets`.
This method may be called multiple times prior to `start` to listen
on multiple ports or interfaces.
.. versionchanged:: 4.4
Added the ``reuse_port`` argument.
"""
sockets = bind_sockets(
port, address=address, family=family, backlog=backlog, reuse_port=reuse_port
)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(
self, num_processes: Optional[int] = 1, max_restarts: Optional[int] = None
) -> None:
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
Values of ``num_processes`` other than 1 are not supported on Windows.
The ``max_restarts`` argument is passed to `.fork_processes`.
.. versionchanged:: 6.0
Added ``max_restarts`` argument.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes, max_restarts)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self) -> None:
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
if self._stopped:
return
self._stopped = True
for fd, sock in self._sockets.items():
assert sock.fileno() == fd
# Unregister socket from IOLoop
self._handlers.pop(fd)()
sock.close()
def handle_stream(
self, stream: IOStream, address: tuple
) -> Optional[Awaitable[None]]:
"""Override to handle a new `.IOStream` from an incoming connection.
This method may be a coroutine; if so any exceptions it raises
asynchronously will be logged. Accepting of incoming connections
will not be blocked by this coroutine.
If this `TCPServer` is configured for SSL, ``handle_stream``
may be called before the SSL handshake has completed. Use
`.SSLIOStream.wait_for_handshake` if you need to verify the client's
certificate or use NPN/ALPN.
.. versionchanged:: 4.2
Added the option for this method to be a coroutine.
"""
raise NotImplementedError()
def _handle_connection(self, connection: socket.socket, address: Any) -> None:
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl_wrap_socket(
connection,
self.ssl_options,
server_side=True,
do_handshake_on_connect=False,
)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error as err:
# If the connection is closed immediately after it is created
# (as in a port scan), we can get one of several errors.
# wrap_socket makes an internal call to getpeername,
# which may return either EINVAL (Mac OS X) or ENOTCONN
# (Linux). If it returns ENOTCONN, this error is
# silently swallowed by the ssl module, so we need to
# catch another error later on (AttributeError in
# SSLIOStream._do_ssl_handshake).
# To test this behavior, try nmap with the -sT flag.
# https://github.com/tornadoweb/tornado/pull/750
if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(
connection,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size,
) # type: IOStream
else:
stream = IOStream(
connection,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size,
)
future = self.handle_stream(stream, address)
if future is not None:
IOLoop.current().add_future(
gen.convert_yielded(future), lambda f: f.result()
)
except Exception:
app_log.error("Error in connection callback", exc_info=True)
|
{
"content_hash": "9a36e144844bcb8cfac9e80134ae6ed5",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 88,
"avg_line_length": 39.708463949843264,
"alnum_prop": 0.6019578432146523,
"repo_name": "bdarnell/tornado",
"id": "476ffc936f731d8c341c5e8c00b55f295ed3db60",
"size": "13242",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tornado/tcpserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1524"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1535018"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
class testModule(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
# we'll be playing around with some vtk objects, this could
# be anything
self._triangleFilter = vtk.vtkTriangleFilter()
self._curvatures = vtk.vtkCurvatures()
self._curvatures.SetCurvatureTypeToMaximum()
self._curvatures.SetInput(self._triangleFilter.GetOutput())
# initialise any mixins we might have
NoConfigModuleMixin.__init__(self,
{'Module (self)' : self,
'vtkTriangleFilter' : self._triangleFilter,
'vtkCurvatures' : self._curvatures})
module_utils.setup_vtk_object_progress(self, self._triangleFilter,
'Triangle filtering...')
module_utils.setup_vtk_object_progress(self, self._curvatures,
'Calculating curvatures...')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# don't forget to call the close() method of the vtkPipeline mixin
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._triangleFilter
del self._curvatures
def get_input_descriptions(self):
return ('vtkPolyData',)
def set_input(self, idx, inputStream):
self._triangleFilter.SetInput(inputStream)
def get_output_descriptions(self):
return (self._curvatures.GetOutput().GetClassName(),)
def get_output(self, idx):
return self._curvatures.GetOutput()
def execute_module(self):
self._curvatures.Update()
def streaming_execute_module(self):
self._curvatures.Update()
|
{
"content_hash": "207ad1e18f348ac24be26dbff960078f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6229050279329609,
"repo_name": "nagyistoce/devide",
"id": "4f98b0b514f8f3fb15115649ca8baaa5070ec340",
"size": "2148",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/user/testModule.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_linear" , "freidman3" , "duckdb")
|
{
"content_hash": "2413ee0c3081bf2851db0c0f55806df0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 66,
"avg_line_length": 31.75,
"alnum_prop": 0.7559055118110236,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "04ab2e297c928151c1e24371a6aae5396ab30e77",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/freidman3/ws_freidman3_SVR_linear_duckdb_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
"""
from "Deep Inside Convolutional Networks: Visualising Image Classification
Models and Saliency Maps"
http://arxiv.org/abs/1312.6034
"""
import theano
import theano.tensor as T
import treeano
import canopy
class SensitivityAnalysisOutput(canopy.handlers.NetworkHandlerImpl):
"""
adds a new input and output to the network
- the input is an int that is an index into the logit
- the output is a tensor of the same shape as the input representing
the result of the sensitivity analysis
idx_input_key: key of the index
output_key: key to put the sensitivity analysis in the results
input_name: node name of the input in the network
logit_name: node name of the logit in the network
"""
def __init__(self, idx_input_key, output_key, input_name, logit_name):
self.idx_input_key = idx_input_key
self.output_key = output_key
self.input_name = input_name
self.logit_name = logit_name
def transform_compile_function_kwargs(self, state, **kwargs):
assert self.idx_input_key not in kwargs["inputs"]
assert self.output_key not in kwargs["outputs"]
network = state.network
input_var = network[self.input_name].get_variable("default").variable
logit_var = network[self.logit_name].get_variable("default").variable
idx_var = T.iscalar()
target_var = logit_var[:, idx_var].sum()
sensitivity_var = T.grad(target_var, input_var)
kwargs["inputs"][self.idx_input_key] = idx_var
kwargs["outputs"][self.output_key] = sensitivity_var
return kwargs
def sensitivity_analysis_fn(input_name,
logit_name,
network,
handlers,
*args,
**kwargs):
"""
returns a function from input to sensitivity analysis heatmap
"""
handlers = [
SensitivityAnalysisOutput(idx_input_key="idx",
output_key="outputs",
input_name=input_name,
logit_name=logit_name),
canopy.handlers.override_hyperparameters(deterministic=True)
] + handlers
fn = canopy.handled_fn(network,
handlers=handlers,
inputs={"input": input_name},
outputs={})
def inner(in_val, idx_val):
return fn({"input": in_val, "idx": idx_val})["outputs"]
return inner
|
{
"content_hash": "585ab71ae350dbc9509c1a3b3ecfa2ad",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 33.92,
"alnum_prop": 0.592374213836478,
"repo_name": "nsauder/treeano",
"id": "3f6915fcb2daf43f128f5cf72b467cf6d48f17d7",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treeano/sandbox/sensitivity_analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16041"
},
{
"name": "Python",
"bytes": "628343"
}
],
"symlink_target": ""
}
|
"""
Quick bot for checking reciprocity of Wikidata-Kulturnav links.
@todo: Add some type of simple html output (e.g. bootstrap from json)
"""
import json
import os
import urllib2
import pywikibot.data.wikidataquery as wdquery
import wikidataStuff.wdqsLookup as wdqsLookup
import wikidataStuff.helpers as helpers
from kulturnavBot import KulturnavBot
def get_wdq(dataset=None, data=None):
"""Find all links from Wikidata to Kulturnav using WDQ.
@todo:
To replace with wdqs we need something like:
SELECT ?item ?value
WHERE {
?item p:P1248 ?data .
?item wdt:P1248 ?value .
{?data pq:P972 wd:Q20742915} UNION
{?data pq:P972 wd:Q20734454}
}
@param dataset: Q-id (or list of Q-ids) corresponding to a dataset.
@type dataset: str or list of str
@param data: dictionary to which data should be added
@type data: dict
@return: (timestamp, dict {qid: uuid})
@rtype: tuple (str, dict)
"""
# initialise if needed
data = data or {}
dataset = helpers.listify(dataset) or []
# make query
pid = '1248'
query = u'CLAIM[%s]' % pid
if dataset:
query += u'{CLAIM['
for d in dataset:
query += u'972:%s,' % d.lstrip('Q')
query = query.rstrip(',') + ']}'
wd_queryset = wdquery.QuerySet(query)
wd_query = wdquery.WikidataQuery(cacheMaxAge=0)
j = wd_query.query(wd_queryset, props=[str(pid), ])
# process data
j = j['props'][pid]
# extract pairs
for i in j:
data[u'Q%d' % i[0]] = i[2]
# get current timestamp
needle = u'Times :'
stats = urllib2.urlopen(u'http://wdq.wmflabs.org/stats').read()
stats = stats[stats.find(needle):]
time = stats[len(needle):stats.find('\n')].strip(' -')
return (time, data)
def get_kulturnav(dataset=None, data=None):
"""Find all links from Kulturnav to Wikidata.
@param dataset: the uuid corresponding to a dataset
@type dataset: str
@param data: object to add the matches to
@type data: dict
@return: matches as key-value pairs {uuid: qid}
@rtype: dict
"""
# initialise if needed
data = data or {}
# handle lists
if isinstance(dataset, list):
for d in dataset:
get_kulturnav(dataset=d, data=data)
return data
# single lookup
batch_size = 250
urlbase = 'http://kulturnav.org/api/search/'
if dataset:
urlbase += 'entity.dataset_r:%s,' % dataset
search_str = u'*%2F%2Fwww.wikidata.org%2Fentity%2FQ*'
matched_tags = ['entity.sameAs_s', 'concept.exactMatch_s']
for match in matched_tags:
offset = 0
search_url = urlbase + match + ':%s/%d/%d'
search_data = KulturnavBot.get_single_search_results(
search_url, search_str, offset, batch_size)
tag = match.split('_')[0]
while search_data:
find_kulturnav_matches(search_data, tag, data)
# continue
offset += batch_size
search_data = KulturnavBot.get_single_search_results(
search_url, search_str, offset, batch_size)
return data
def find_kulturnav_matches(search_data, tag, data):
"""Extract uuid and wikidata qid from search results.
Adds the results to the provided data dict.
@param search_data: the output of KulturnavBot.get_single_search_results()
@type search_data: list
@param tag: the property tag for sameAs/exactMatch
@type tag: str
@param data: object to add the matches to
@type data: dict
"""
needles = (u'http://www.wikidata.org', 'https://www.wikidata.org')
for entry in search_data:
# extract uuid and wikidata qid
uuid = entry[u'uuid']
matches = entry[u'properties'][tag]
for m in matches:
if m[u'value'].startswith(needles):
qid = m[u'value'].split('/')[-1]
data[uuid] = qid
def get_references(owner=None):
"""Query for the number of statements sourced through Kulturnav.
@param owner: the Qid of the dataset owning organisation
@type owner: str or None
@return: the number of sourced statement
@rtype: int
"""
query = ""\
"SELECT (count(?statement) as ?mentions) WHERE {\n" \
" ?statement prov:wasDerivedFrom ?ref .\n" \
" ?ref pr:P248 ?dataset .\n" \
" ?dataset wdt:P31 wd:Q1172284 .\n" \
" ?dataset wdt:P361 wd:Q16323066 .\n"
if owner:
query += " ?dataset wdt:P127 wd:%s .\n" % owner
query += "}"
# perform query
data = wdqsLookup.make_simple_wdqs_query(query)
return int(data[0]['mentions'])
def compare(k_dataset=None, w_dataset=None):
"""Compare the links from Wikidata to Kulturnav and vice versa.
@param k_dataset: the uuid corresponding to a dataset
@type k_dataset: str
@param w_dataset: the qid corresponding to a dataset
@type w_dataset: str
@return: comparison {_status, kulturnav_only, wikidata_only, mismatches}
@rtype: dict
"""
k_data = get_kulturnav(k_dataset)
time, w_data = get_wdq(w_dataset)
mismatch, k_only, w_only = identify_missing_and_missmatched(k_data, w_data)
# prepare response
status = {
'wdq_time': time,
'kulturnav_hits': len(k_data),
'kulturnav_dataset': k_dataset,
'wikidata_hits': len(w_data),
'wikidata_dataset': w_dataset,
'mismatches': len(mismatch)
}
response = {
'_status': status,
'kulturnav_only': k_only,
'wikidata_only': w_only,
'mismatches': mismatch
}
return response
def identify_missing_and_missmatched(k_data_orig, w_data_orig):
"""Identify any non-reciprocated links and any missmatches.
Where missmatches are links where the target is in turn pointing to another
object.
@param k_data_orig: the output of get_kulturnav
@type k_data_orig: dict
@param w_data_orig: the main (second) output of get_wdq
@type w_data_orig: dict
@return: (mismatch, k_only, w_only)
@rtype: tuple (list, dict, dict)
"""
# prevent originals from being modified
k_data = k_data_orig.copy()
w_data = w_data_orig.copy()
k_only = {}
mismatch = []
for uuid, qid in k_data.iteritems():
if qid in w_data.keys():
if w_data[qid] != uuid:
mismatch.append((uuid, qid, w_data[qid]))
del w_data[qid]
else:
k_only[uuid] = qid
for qid, uuid in w_data.iteritems():
if uuid in k_only.keys():
mismatch.append((qid, uuid, k_only[uuid]))
del k_only[uuid]
return (mismatch, k_only, w_data)
def test_all(out_dir):
"""Run test for all data."""
run_test(
dataset_id=None,
dataset_q=None,
owner_q=None,
outfile=os.path.join(out_dir, 'synk-All.json')
)
def test_ArkDes(out_dir):
"""Run test for ArkDes data."""
run_test(
dataset_id='2b7670e1-b44e-4064-817d-27834b03067c',
dataset_q='Q17373699',
owner_q='Q4356728',
outfile=os.path.join(out_dir, 'synk-Arkdes.json')
)
def test_SMM(out_dir):
"""Run test for SMM data."""
dataset_id = ['9a816089-2156-42ce-a63a-e2c835b20688',
'c43d8eba-030b-4542-b1ac-6a31a0ba6d00',
'51f2bd1f-7720-4f03-8d95-c22a85d26bbb',
'c6a7e732-650f-4fdb-a34c-366088f1ff0e',
'6a98b348-8c90-4ccc-9da7-42351bd4feb7',
'fb4faa4b-984a-404b-bdf7-9c24a298591e',
'b0fc1427-a9ab-4239-910a-cd02c02c4a76']
dataset_q = ['Q20734454',
'Q20103697',
'Q20742915',
'Q20669482',
'Q20742975',
'Q20742782',
'Q20669386']
run_test(
dataset_id=dataset_id,
dataset_q=dataset_q,
owner_q='Q10677695',
outfile=os.path.join(out_dir, 'synk-SMM.json')
)
def test_NatMus(out_dir):
"""Run test for NatMus data."""
run_test(
dataset_id='c6efd155-8433-4c58-adc9-72db80c6ce50',
dataset_q='Q22681075',
owner_q='Q842858',
outfile=os.path.join(out_dir, 'synk-Natmus.json')
)
def run_test(dataset_id, dataset_q, owner_q, outfile):
"""Run a test for a given set of parameters and output.
@param dataset_id: kulturnav uuid of the dataset
@type dataset_id: str or list of str
@param dataset_q: Wikidata qid of the dataset
@type dataset_q: str or list of str
@param owner_q: Wikidata qid of the "owner" organisation
@type owner_q: str
@param outfile: file to write to
@type outfile: str
"""
response = compare(dataset_id, dataset_q)
response['_status']['source_references'] = get_references(owner_q)
with open(outfile, 'w') as f:
f.write(json.dumps(response))
f.close()
if __name__ == "__main__":
import sys
usage = "Usage: python synkedKulturnav.py outdir\n" \
"\toutdir(optional): dir in which to stick output. " \
"Defaults to the 'synk_data' sub-directory."
argv = sys.argv[1:]
out_dir = os.path.join(os.path.split(__file__)[0], u'synk_data')
if len(argv) == 1:
out_dir = argv[0]
# create out_dir if needed
if out_dir and not os.path.exists(out_dir):
os.mkdir(out_dir)
# run tests
test_all(out_dir)
test_ArkDes(out_dir)
test_SMM(out_dir)
test_NatMus(out_dir)
|
{
"content_hash": "5b2a7f7473337e063c4ce4b20ce0f98c",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 79,
"avg_line_length": 29.79937304075235,
"alnum_prop": 0.5989901115085209,
"repo_name": "lokal-profil/wikidata_batches",
"id": "a323881e4a03334d366e6a7f9579af12de961fe8",
"size": "9548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KulturNav/synkedKulturnav.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "191683"
}
],
"symlink_target": ""
}
|
"""Fichier contenant la classe Condition, détaillée plus bas."""
from .instruction import Instruction
from .parser import expressions
class Condition(Instruction):
"""Classe définissant une condition.
Une condition est une instruction optionnellement suivie d'une suite
de tests.
Par exemple :
>>> si variable = 5:
... # Instructions si variable vaut 5
... sinon si titre(salle) = "...":
... # Instructions si le titre de la salle est '...'
... sinon:
... # Instruction sinon
... finsi
"""
def __init__(self):
"""Constructeur d'une condition"""
Instruction.__init__(self)
self.type = None
self.tests = None
def __str__(self):
ret = "|mr|" + self.type + "|ff|"
if self.type == "si" or self.type == "sinon si":
ret += " " + str(self.tests) + "|mr|:|ff|"
elif self.type == "sinon":
ret += "|mr|:|ff|"
return ret
@classmethod
def peut_interpreter(cls, chaine):
"""La chaîne peut-elle être interprétée par la classe Condition."""
mot_cles = ("si ", "sinon si ", "sinon", "finsi")
return any(chaine.startswith(m) for m in mot_cles)
@classmethod
def construire(cls, chaine):
"""Construit l'instruction.
L'instruction est sous la forme :
type tests
"""
taille_type = 0
chn_condition = chaine
condition = Condition()
mot_cles = ("si ", "sinon si ", "sinon", "finsi")
for mot in mot_cles:
if chaine.startswith(mot):
taille_type = len(mot)
condition.type = mot.rstrip(" ")
break
if chaine.endswith(":"):
chaine = chaine[:-1]
condition.tests, chaine = expressions["tests"].parser(
chaine[taille_type:])
return condition
def deduire_niveau(self, dernier_niveau):
"""Déduit le niveau de l'instruction."""
self.niveau = dernier_niveau
if self.type != "si":
self.niveau -= 1
def get_niveau_suivant(self):
"""Retourne le niveau de la prochaine instruction."""
niveau = self.niveau
if self.type != "finsi":
niveau += 1
return niveau
@property
def code_python(self):
"""Retourne le code Python de l'instruction."""
py_types = {
"si": "if",
"sinon si": "elif",
"sinon": "else",
"finsi": "",
}
py_code = py_types[self.type]
if self.type in ("si", "sinon si"):
py_code += " " + self.tests.code_python
if self.type != "finsi":
py_code += ":"
return py_code
|
{
"content_hash": "9196a9b9c2e8e99d83d639602cf4e671",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 75,
"avg_line_length": 28.594059405940595,
"alnum_prop": 0.5020775623268698,
"repo_name": "vlegoff/tsunami",
"id": "2c87d72de0747a2295a88a5f30fae399feeb9a3d",
"size": "4466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/scripting/condition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
import datetime
import random
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import orm
from sqlalchemy import select
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import aliased
from sqlalchemy.orm import defer
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import registry
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import with_loader_criteria
from sqlalchemy.orm.decl_api import declared_attr
from sqlalchemy.testing import eq_
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
class _Fixtures(_fixtures.FixtureTest):
@testing.fixture
def user_address_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
return User, Address
@testing.fixture
def order_item_fixture(self):
Order, Item = self.classes("Order", "Item")
orders, items, order_items = self.tables(
"orders", "items", "order_items"
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
# m2m
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
),
},
)
self.mapper_registry.map_imperatively(Item, items)
return Order, Item
@testing.fixture
def user_order_item_fixture(self):
User, Order, Item = self.classes("User", "Order", "Item")
users, orders, items, order_items = self.tables(
"users", "orders", "items", "order_items"
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, order_by=orders.c.id)},
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
# m2m
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
),
},
)
self.mapper_registry.map_imperatively(Item, items)
return User, Order, Item
@testing.fixture
def mixin_fixture(self):
users = self.tables.users
class HasFoob(object):
name = Column(String)
class UserWFoob(HasFoob, self.Comparable):
pass
self.mapper_registry.map_imperatively(
UserWFoob,
users,
)
return HasFoob, UserWFoob
@testing.fixture
def declattr_mixin_fixture(self):
users = self.tables.users
class HasFoob(object):
@declared_attr
def name(cls):
return Column(String)
class UserWFoob(HasFoob, self.Comparable):
pass
self.mapper_registry.map_imperatively(
UserWFoob,
users,
)
return HasFoob, UserWFoob
@testing.fixture
def multi_mixin_fixture(self):
orders, items = self.tables.orders, self.tables.items
order_items = self.tables.order_items
class HasFoob(object):
description = Column(String)
class HasBat(HasFoob):
some_nothing = Column(Integer)
class Order(HasFoob, self.Comparable):
pass
class Item(HasBat, self.Comparable):
pass
base = registry()
base.map_imperatively(
Order,
orders,
properties={"items": relationship("Item", secondary=order_items)},
)
base.map_imperatively(Item, items)
return HasFoob, Order, Item
class LoaderCriteriaTest(_Fixtures, testing.AssertsCompiledSQL):
"""
combinations:
with_loader_criteria
# for these we have mapper_criteria
select(mapper) # select_mapper
select(mapper.col, mapper.col) # select_mapper_col
select(func.count()).select_from(mapper) # select_from_mapper
select(a).join(mapper, a.target) # select_join_mapper
select(a).options(joinedload(a.target)) # select_joinedload_mapper
# for these we have aliased_criteria, inclaliased_criteria
select(aliased) # select_aliased
select(aliased.col, aliased.col) # select_aliased_col
select(func.count()).select_from(aliased) # select_from_aliased
select(a).join(aliased, a.target) # select_join_aliased
select(a).options(joinedload(a.target.of_type(aliased))
# select_joinedload_aliased
"""
__dialect__ = "default"
def test_select_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).options(
with_loader_criteria(User, User.name != "name")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name "
"FROM users WHERE users.name != :name_1",
)
def test_criteria_post_replace(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(User)
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
.with_only_columns(func.count())
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
def test_criteria_post_replace_legacy(self, user_address_fixture):
User, Address = user_address_fixture
s = fixture_session()
stmt = (
s.query(User)
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
.with_entities(func.count())
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
def test_select_from_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(sql.func.count())
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
def test_select_mapper_columns_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User.id, User.name).options(
with_loader_criteria(User, User.name != "name")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name "
"FROM users WHERE users.name != :name_1",
)
def test_select_join_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(User)
.join(User.addresses)
.options(
with_loader_criteria(Address, Address.email_address != "name")
)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_joinm2m_mapper_mapper_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
stmt = (
select(Order)
.join(Order.items)
.options(
with_loader_criteria(Item, Item.description != "description")
)
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen FROM orders "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"AND items.description != :description_1",
)
def test_select_joinedload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = select(User).options(
joinedload(User.addresses),
with_loader_criteria(Address, Address.email_address != "name"),
)
self.assert_compile(
stmt,
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY addresses_1.id",
)
def test_select_selectinload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = select(User).options(
selectinload(User.addresses),
with_loader_criteria(Address, Address.email_address != "name"),
)
s = Session(testing.db, future=True)
with self.sql_execution_asserter() as asserter:
s.execute(stmt).all()
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users",
[],
),
CompiledSQL(
"SELECT addresses.user_id AS addresses_user_id, addresses.id "
"AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"primary_keys": [7, 8, 9, 10], "email_address_1": "name"}],
),
)
def test_select_lazyload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = (
select(User)
.options(
with_loader_criteria(Address, Address.email_address != "name"),
)
.order_by(User.id)
)
s = Session(testing.db, future=True)
with self.sql_execution_asserter() as asserter:
for u in s.execute(stmt).scalars():
u.addresses
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id",
[],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "email_address_1": "name"}],
),
)
def test_select_aliased_inclaliased_criteria(self, user_address_fixture):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1).options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_select_from_aliased_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = (
select(sql.func.count())
.select_from(u1)
.options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users AS users_1 "
"WHERE users_1.name != :name_1",
)
def test_select_aliased_columns_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1.id, u1.name).options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_select_join_aliased_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
a1 = aliased(Address)
stmt = (
select(User)
.join(User.addresses.of_type(a1))
.options(
with_loader_criteria(
Address,
Address.email_address != "name",
include_aliases=True,
)
)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1",
)
def test_select_joinm2m_aliased_inclaliased_criteria(
self, order_item_fixture
):
Order, Item = order_item_fixture
i1 = aliased(Item)
stmt = (
select(Order)
.join(Order.items.of_type(i1))
.options(
with_loader_criteria(
Item,
Item.description != "description",
include_aliases=True,
)
)
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen FROM orders "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id "
"AND items_1.description != :description_1",
)
def test_select_aliased_aliased_criteria(self, user_address_fixture):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1).options(with_loader_criteria(u1, u1.name != "name"))
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_select_aliased_columns_aliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1.id, u1.name).options(
with_loader_criteria(u1, u1.name != "name")
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_joinedload_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
stmt = select(User).options(
joinedload(User.addresses),
with_loader_criteria(Address, Address.email_address != "email"),
)
with self.sql_execution_asserter() as asserter:
s.execute(stmt)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY addresses_1.id",
[{"email_address_1": "email"}],
),
)
def test_query_count_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = s.query(User).options(with_loader_criteria(User, User.id != 8))
with self.sql_execution_asserter() as asserter:
q.count()
asserter.assert_(
CompiledSQL(
"SELECT count(*) AS count_1 FROM (SELECT "
"users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id != :id_1) AS anon_1",
[{"id_1": 8}],
),
)
def test_query_count_after_the_fact_global_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
s = Session(testing.db)
# this essentially tests that the query.from_self() which takes
# place in count() is one that can still be affected by
# the loader criteria, meaning it has to be an ORM query
q = s.query(User)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
with self.sql_execution_asserter() as asserter:
q.count()
asserter.assert_(
CompiledSQL(
"SELECT count(*) AS count_1 FROM (SELECT "
"users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id != :id_1) AS anon_1",
[{"id_1": 8}],
),
)
def test_select_count_subquery_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).subquery()
stmt = (
select(sql.func.count())
.select_from(stmt)
.options(with_loader_criteria(User, User.id != 8))
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM (SELECT users.id AS id, "
"users.name AS name FROM users WHERE users.id != :id_1) AS anon_1",
)
def test_query_outerjoin_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = (
s.query(User, Address)
.outerjoin(User.addresses)
.options(
with_loader_criteria(
Address,
~Address.email_address.like("ed@%"),
)
)
.order_by(User.id)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM users LEFT OUTER JOIN addresses "
"ON users.id = addresses.user_id AND "
"addresses.email_address NOT LIKE :email_address_1 "
"ORDER BY users.id",
)
eq_(
q.all(),
[
(User(id=7), Address(id=1)),
(User(id=8), None), # three addresses not here
(User(id=9), Address(id=5)),
(User(id=10), None),
],
)
def test_caching_and_binds_lambda(self, mixin_fixture):
HasFoob, UserWFoob = mixin_fixture
statement = select(UserWFoob).filter(UserWFoob.id < 10)
def go(value):
return statement.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.name == value,
include_aliases=True,
)
)
s = Session(testing.db, future=True)
for i in range(10):
name = random.choice(["ed", "fred", "jack"])
stmt = go(name)
eq_(s.execute(stmt).scalars().all(), [UserWFoob(name=name)])
def test_unnamed_param_dont_fail(self, multi_mixin_fixture):
HasFoob, Order, Item = multi_mixin_fixture
def go(stmt, value):
return stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description == "order 3",
include_aliases=True,
)
)
with Session(testing.db) as sess:
for i in range(10):
name = random.choice(["order 1", "order 3", "order 5"])
statement = select(Order)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Order(description="order 3")],
)
def test_declared_attr_no_warning(self, declattr_mixin_fixture):
HasFoob, UserWFoob = declattr_mixin_fixture
statement = select(UserWFoob).filter(UserWFoob.id < 10)
def go(value):
return statement.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.name == value,
include_aliases=True,
)
)
s = Session(testing.db, future=True)
for i in range(10):
name = random.choice(["ed", "fred", "jack"])
stmt = go(name)
eq_(s.execute(stmt).scalars().all(), [UserWFoob(name=name)])
def test_caching_and_binds_lambda_more_mixins(self, multi_mixin_fixture):
# By including non-mapped mixin HasBat in the middle of the
# hierarchy, we test issue #5766
HasFoob, Order, Item = multi_mixin_fixture
def go(stmt, value):
return stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description == value,
include_aliases=True,
)
)
with Session(testing.db) as sess:
for i in range(10):
name = random.choice(["order 1", "order 3", "order 5"])
statement = select(Order)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Order(description=name)],
)
name = random.choice(["item 1", "item 3", "item 5"])
statement = select(Item)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Item(description=name)],
)
def test_never_for_refresh(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.get(User, 8)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
s.refresh(u1)
eq_(u1.name, "ed")
def test_never_for_unexpire(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.get(User, 8)
s.expire(u1)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
eq_(u1.name, "ed")
def test_never_for_undefer(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.execute(
select(User).options(defer(User.name)).filter(User.id == 8)
).scalar_one()
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
eq_(u1.name, "ed")
class TemporalFixtureTest(testing.fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
class HasTemporal(object):
"""Mixin that identifies a class as having a timestamp column"""
timestamp = Column(
DateTime, default=datetime.datetime.utcnow, nullable=False
)
cls.HasTemporal = HasTemporal
def temporal_range(range_lower, range_upper):
return with_loader_criteria(
HasTemporal,
lambda cls: cls.timestamp.between(range_lower, range_upper),
include_aliases=True,
)
cls.temporal_range = staticmethod(temporal_range)
class Parent(HasTemporal, cls.DeclarativeBasic):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
class Child(HasTemporal, cls.DeclarativeBasic):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(
Integer, ForeignKey("parent.id"), nullable=False
)
@classmethod
def insert_data(cls, connection):
Parent, Child = cls.classes("Parent", "Child")
sess = Session(connection)
c1, c2, c3, c4, c5 = [
Child(timestamp=datetime.datetime(2009, 10, 15, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 20, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 12, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00)),
]
p1 = Parent(
timestamp=datetime.datetime(2009, 10, 15, 12, 00, 00),
children=[c1, c2, c3],
)
p2 = Parent(
timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00),
children=[c4, c5],
)
sess.add_all([p1, p2])
sess.commit()
@testing.combinations((True,), (False,), argnames="use_caching")
@testing.combinations(
(None,),
(orm.lazyload,),
(orm.joinedload,),
(orm.subqueryload,),
(orm.selectinload,),
argnames="loader_strategy",
)
def test_same_relatinship_load_different_range(
self, use_caching, loader_strategy
):
"""This is the first test that exercises lazy loading, which uses
a lambda select, which then needs to transform the select to have
different bound parameters if it's not cached (or generate a working
list of parameters if it is), which then calls into a
with_loader_crieria that itself has another lambda inside of it,
which means we have to traverse and replace that lambda's expression,
but we can't evaluate it until compile time, so the inner lambda
holds onto the "transform" function so it can run it as needed.
this makes use of a new feature in visitors that exports a
"run this traversal later" function.
All of these individual features, cloning lambdaelements,
running replacement traversals later, are very new and need a lot
of tests, most likely in test/sql/test_lambdas.py.
the test is from the "temporal_range" example which is the whole
use case this feature is designed for and it is a whopper.
"""
Parent, Child = self.classes("Parent", "Child")
temporal_range = self.temporal_range
if use_caching:
Parent.children.property.bake_queries = True
eng = testing.db
else:
Parent.children.property.bake_queries = False
eng = testing.db.execution_options(compiled_cache=None)
sess = Session(eng, future=True)
if loader_strategy:
loader_options = (loader_strategy(Parent.children),)
else:
loader_options = ()
is_joined = (
loader_strategy and loader_strategy.__name__ == "joinedload"
)
p1 = sess.execute(
select(Parent).filter(
Parent.timestamp == datetime.datetime(2009, 10, 15, 12, 00, 00)
)
).scalar()
c1, c2 = p1.children[0:2]
c2_id = c2.id
p2 = sess.execute(
select(Parent).filter(
Parent.timestamp == datetime.datetime(2009, 10, 17, 12, 00, 00)
)
).scalar()
c5 = p2.children[1]
result = sess.execute(
select(Parent)
.execution_options(populate_existing=True)
.options(
temporal_range(
datetime.datetime(2009, 10, 16, 12, 00, 00),
datetime.datetime(2009, 10, 18, 12, 00, 00),
),
*loader_options
)
)
if is_joined:
result = result.unique()
parents = result.scalars().all()
assert parents[0] == p2
assert parents[0].children == [c5]
result = sess.execute(
select(Parent)
.execution_options(populate_existing=True)
.join(Parent.children)
.filter(Child.id == c2_id)
.options(
temporal_range(
datetime.datetime(2009, 10, 15, 11, 00, 00),
datetime.datetime(2009, 10, 18, 12, 00, 00),
),
*loader_options
)
)
if is_joined:
result = result.unique()
parents = result.scalars().all()
assert parents[0] == p1
assert parents[0].children == [c1, c2]
class RelationshipCriteriaTest(_Fixtures, testing.AssertsCompiledSQL):
__dialect__ = "default"
def _user_minus_edwood(self, User, Address):
return [
User(
addresses=[
Address(email_address="jack@bean.com", id=1, user_id=7)
],
id=7,
name="jack",
),
User(
addresses=[
Address(
email_address="ed@bettyboop.com",
id=3,
user_id=8,
),
Address(email_address="ed@lala.com", id=4, user_id=8),
],
id=8,
name="ed",
),
User(
addresses=[
Address(email_address="fred@fred.com", id=5, user_id=9)
],
id=9,
name="fred",
),
User(addresses=[], id=10, name="chuck"),
]
def _user_minus_edlala(self, User, Address):
return [
User(
addresses=[
Address(email_address="jack@bean.com", id=1, user_id=7)
],
id=7,
name="jack",
),
User(
addresses=[
Address(email_address="ed@wood.com", id=2, user_id=8),
Address(
email_address="ed@bettyboop.com",
id=3,
user_id=8,
),
],
id=8,
name="ed",
),
User(
addresses=[
Address(email_address="fred@fred.com", id=5, user_id=9)
],
id=9,
name="fred",
),
User(addresses=[], id=10, name="chuck"),
]
def test_joinedload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
stmt = (
select(User)
.options(
joinedload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
s.close()
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY users.id, addresses_1.id",
[{"email_address_1": value}],
),
)
@testing.combinations(
lambda r: r.scalar(),
lambda r: r.scalar_one(),
lambda r: r.scalar_one_or_none(),
argnames="get",
)
def test_joinedload_scalar(self, user_address_fixture, get):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
stmt = (
select(User)
.options(joinedload(User.addresses))
.where(User.name == "jack")
)
r = s.execute(stmt).unique()
jack = get(r)
eq_(jack.name, "jack")
def test_selectinload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
stmt = (
select(User)
.options(
selectinload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in (
"ed@wood.com",
"ed@lala.com",
"ed@wood.com",
"ed@lala.com",
):
s.close()
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.user_id AS addresses_user_id, "
"addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[
{
"primary_keys": [7, 8, 9, 10],
"email_address_1": value,
}
],
),
)
@testing.combinations((True,), (False,), argnames="use_compiled_cache")
def test_selectinload_nested_criteria(
self, user_order_item_fixture, use_compiled_cache
):
User, Order, Item = user_order_item_fixture
if not use_compiled_cache:
s = Session(
testing.db.execution_options(compiled_cache=None), future=True
)
else:
s = Session(testing.db, future=True)
def go(order_description, item_description):
stmt = (
select(User)
.where(User.id == 7)
.options(
selectinload(
User.orders.and_(
Order.description == order_description
)
).joinedload(
Order.items.and_(Item.description == item_description)
),
)
)
return s.execute(stmt)
for order_description, item_description, oid, iid in (
("order 3", "item 3", 3, 3),
("order 3", "item 4", 3, 4),
("order 3", "item 4", 3, 4),
("order 5", "item 5", 5, 5),
("order 3", "item 3", 3, 3),
("order 5", "item 5", 5, 5),
):
s.close()
with self.sql_execution_asserter() as asserter:
result = go(order_description, item_description)
eq_(
result.scalars().unique().all(),
[User(id=7, orders=[Order(id=oid, items=[Item(id=iid)])])],
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users "
"WHERE users.id = :id_1",
[{"id_1": 7}],
),
CompiledSQL(
"SELECT orders.user_id AS orders_user_id, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description "
"FROM orders LEFT OUTER JOIN "
"(order_items AS order_items_1 "
"JOIN items AS items_1 "
"ON items_1.id = order_items_1.item_id "
"AND items_1.description = :description_1) "
"ON orders.id = order_items_1.order_id "
"WHERE orders.user_id IN ([POSTCOMPILE_primary_keys]) "
"AND orders.description = :description_2 "
"ORDER BY orders.id, items_1.id",
[
{
"description_1": item_description,
"primary_keys": [7],
"description_2": order_description,
}
],
),
)
def test_lazyload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
s.close()
stmt = (
select(User)
.options(
lazyload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "email_address_1": value}],
),
)
def test_subqueryload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
s.close()
stmt = (
select(User)
.options(
subqueryload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, addresses.email_address "
"AS addresses_email_address, anon_1.users_id "
"AS anon_1_users_id FROM (SELECT users.id AS users_id "
"FROM users) AS anon_1 "
"JOIN addresses ON anon_1.users_id = "
"addresses.user_id AND "
"addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"email_address_1": value}],
),
)
def test_query_join_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = s.query(User).join(
User.addresses.and_(Address.email_address != "email")
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_join_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).join(
User.addresses.and_(Address.email_address != "email")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_joinm2m_local_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
stmt = select(Order).join(
Order.items.and_(Item.description != "description")
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen "
"FROM orders JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"AND items.description != :description_1",
)
def test_select_joinm2m_aliased_local_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
i1 = aliased(Item)
stmt = select(Order).join(
Order.items.of_type(i1).and_(i1.description != "description")
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen "
"FROM orders JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id "
"AND items_1.description != :description_1",
)
|
{
"content_hash": "57279fe2c9060eafe42a874ab6822643",
"timestamp": "",
"source": "github",
"line_count": 1449,
"max_line_length": 79,
"avg_line_length": 33.32919254658385,
"alnum_prop": 0.5088830910672133,
"repo_name": "monetate/sqlalchemy",
"id": "571d2723200a2e09a4c1a8ba9e85f6ca5e069a6f",
"size": "48294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/test_relationship_criteria.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
}
|
import warnings
from . import _common
__all__ = [ # noqa: F822
'central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.misc.common is deprecated and has no attribute "
f"{name}. Try looking in scipy.misc instead.")
warnings.warn(f"Please use `{name}` from the `scipy.misc` namespace, "
"the `scipy.misc.common` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_common, name)
|
{
"content_hash": "14474f7413e55ea27b833b2746ef35f6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 25.6,
"alnum_prop": 0.6015625,
"repo_name": "matthew-brett/scipy",
"id": "8e556e2963148a23a2e2b321b150b19b4d08832b",
"size": "797",
"binary": false,
"copies": "2",
"ref": "refs/heads/polished-meson-windows",
"path": "scipy/misc/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
# If elftools is not installed, maybe we're running from the root or examples
# dir of the source distribution
try:
import elftools
except ImportError:
sys.path.extend(['.', '..'])
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
def process_file(filename):
print('Processing file:', filename)
with open(filename, 'rb') as f:
section_info_lowlevel(f)
f.seek(0)
section_info_highlevel(f)
def section_info_lowlevel(stream):
print('Low level API...')
# We'll still be using the ELFFile context object. It's just too
# convenient to give up, even in the low-level API demonstation :-)
elffile = ELFFile(stream)
# The e_shnum ELF header field says how many sections there are in a file
print(' %s sections' % elffile['e_shnum'])
# Try to find the symbol table
for i in range(elffile['e_shnum']):
section_offset = elffile['e_shoff'] + i * elffile['e_shentsize']
# Parse the section header using structs.Elf_Shdr
stream.seek(section_offset)
section_header = elffile.structs.Elf_Shdr.parse_stream(stream)
if section_header['sh_type'] == 'SHT_SYMTAB':
# Some details about the section. Note that the section name is a
# pointer to the object's string table, so it's only a number
# here. To get to the actual name one would need to parse the string
# table section and extract the name from there (or use the
# high-level API!)
print(' Section name: %s, type: %s' % (
section_header['sh_name'], section_header['sh_type']))
break
else:
print(' No symbol table found. Perhaps this ELF has been stripped?')
def section_info_highlevel(stream):
print('High level API...')
elffile = ELFFile(stream)
# Just use the public methods of ELFFile to get what we need
# Note that section names, like everything read from the file, are bytes
# objects.
print(' %s sections' % elffile.num_sections())
section = elffile.get_section_by_name(b'.symtab')
if not section:
print(' No symbol table found. Perhaps this ELF has been stripped?')
return
# A section type is in its header, but the name was decoded and placed in
# a public attribute.
# bytes2str is used to print the name of the section for consistency of
# output between Python 2 and 3. The section name is a bytes object.
print(' Section name: %s, type: %s' %(
bytes2str(section.name), section['sh_type']))
# But there's more... If this section is a symbol table section (which is
# the case in the sample ELF file that comes with the examples), we can
# get some more information about it.
if isinstance(section, SymbolTableSection):
num_symbols = section.num_symbols()
print(" It's a symbol section with %s symbols" % num_symbols)
print(" The name of the last symbol in the section is: %s" % (
bytes2str(section.get_symbol(num_symbols - 1).name)))
if __name__ == '__main__':
for filename in sys.argv[1:]:
process_file(filename)
|
{
"content_hash": "c1cc1babd35bee5c0d134f31d08db640",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 80,
"avg_line_length": 37.91954022988506,
"alnum_prop": 0.6523188845104577,
"repo_name": "windyuuy/opera",
"id": "05d75642ac88fbc51600909441c012a86537ec0b",
"size": "3733",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/pyelftools/examples/elf_low_high_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "51642"
},
{
"name": "Batchfile",
"bytes": "35942"
},
{
"name": "C",
"bytes": "4303018"
},
{
"name": "C#",
"bytes": "35203"
},
{
"name": "C++",
"bytes": "207333360"
},
{
"name": "CMake",
"bytes": "25089"
},
{
"name": "CSS",
"bytes": "681256"
},
{
"name": "Dart",
"bytes": "24294"
},
{
"name": "Emacs Lisp",
"bytes": "25534"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "10400943"
},
{
"name": "IDL",
"bytes": "836"
},
{
"name": "Java",
"bytes": "2821184"
},
{
"name": "JavaScript",
"bytes": "14563996"
},
{
"name": "Lua",
"bytes": "13749"
},
{
"name": "Makefile",
"bytes": "55521"
},
{
"name": "Objective-C",
"bytes": "1211523"
},
{
"name": "Objective-C++",
"bytes": "6221908"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "82949"
},
{
"name": "Protocol Buffer",
"bytes": "280464"
},
{
"name": "Python",
"bytes": "12627773"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "894814"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
}
|
import unittest
from ...compatibility import StringIO
from ...workbook import Workbook
class TestWriteSheet(unittest.TestCase):
"""
Test the Workbook _write_sheet() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_sheet1(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet('Sheet1', 1, 0)
exp = """<sheet name="Sheet1" sheetId="1" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet2(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet('Sheet1', 1, 1)
exp = """<sheet name="Sheet1" sheetId="1" state="hidden" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet3(self):
"""Test the _write_sheet() method"""
self.workbook._write_sheet('Bits & Bobs', 1, 0)
exp = """<sheet name="Bits & Bobs" sheetId="1" r:id="rId1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def tearDown(self):
self.workbook.fileclosed = 1
|
{
"content_hash": "2e26d296a28708ed156dd1d13c014608",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 25.416666666666668,
"alnum_prop": 0.580327868852459,
"repo_name": "jkyeung/XlsxWriter",
"id": "f4c06f159cf9449b3f654e1f9b2b3d4fd7922b52",
"size": "1393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/workbook/test_write_sheet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
'''Unit tests for grit.gather.rc'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import rc
from grit import util
class RcUnittest(unittest.TestCase):
part_we_want = '''IDC_KLONKACC ACCELERATORS
BEGIN
"?", IDM_ABOUT, ASCII, ALT
"/", IDM_ABOUT, ASCII, ALT
END'''
def testSectionFromFile(self):
buf = '''IDC_SOMETHINGELSE BINGO
BEGIN
BLA BLA
BLA BLA
END
%s
IDC_KLONK BINGOBONGO
BEGIN
HONGO KONGO
END
''' % self.part_we_want
f = StringIO(buf)
out = rc.Section(f, 'IDC_KLONKACC')
out.ReadSection()
self.failUnless(out.GetText() == self.part_we_want)
out = rc.Section(util.PathFromRoot(r'grit/testdata/klonk.rc'),
'IDC_KLONKACC',
encoding='utf-16')
out.ReadSection()
out_text = out.GetText().replace('\t', '')
out_text = out_text.replace(' ', '')
self.part_we_want = self.part_we_want.replace(' ', '')
self.failUnless(out_text.strip() == self.part_we_want.strip())
def testDialog(self):
dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
// try a line where the ID is on the continuation line
LTEXT "blablablabla blablabla blablablablablablablabla blablabla",
ID_SMURF, whatever...
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
self.failUnless(len(dlg.GetTextualIds()) == 7)
self.failUnless(len(dlg.GetCliques()) == 6)
self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() ==
'klonk Version "yibbee" 1.0')
transl = dlg.Translate('en')
self.failUnless(transl.strip() == dlg.GetText().strip())
def testAlternateSkeleton(self):
dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "Yipee skippy",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
alt_dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 040704, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "XXXXXXXXX"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "XXXXXXXXXXXXXXXXX",IDC_STATIC,110978,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
alt_dlg.Parse()
transl = dlg.Translate('en', skeleton_gatherer=alt_dlg)
self.failUnless(transl.count('040704') and
transl.count('110978'))
self.failUnless(transl.count('Yipee skippy'))
def testMenu(self):
menu = rc.Menu(StringIO('''IDC_KLONK MENU
BEGIN
POPUP "&File """
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
END
MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
ID_FILE_THISISAVERYLONGMENUCAPTIONTOTRYTOSEEIFWECANMAKETHEIDGOTOACONTINUATIONLINE
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END'''), 'IDC_KLONK')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 6)
self.failUnless(len(menu.GetCliques()) == 1)
self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) ==
9)
transl = menu.Translate('en')
self.failUnless(transl.strip() == menu.GetText().strip())
def testVersion(self):
version = rc.Version(StringIO('''
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,0,1
PRODUCTVERSION 1,0,0,1
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904e4"
BEGIN
VALUE "CompanyName", "TODO: <Company name>"
VALUE "FileDescription", "TODO: <File description>"
VALUE "FileVersion", "1.0.0.1"
VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
VALUE "InternalName", "res_format_test.dll"
VALUE "OriginalFilename", "res_format_test.dll"
VALUE "ProductName", "TODO: <Product name>"
VALUE "ProductVersion", "1.0.0.1"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
'''.strip()), 'VS_VERSION_INFO')
version.Parse()
self.failUnless(len(version.GetTextualIds()) == 1)
self.failUnless(len(version.GetCliques()) == 4)
transl = version.Translate('en')
self.failUnless(transl.strip() == version.GetText().strip())
def testRegressionDialogBox(self):
dialog = rc.Dialog(StringIO('''
IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE DIALOGEX 0, 0, 205, 157
STYLE DS_SETFONT | DS_FIXEDSYS | WS_CHILD
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
EDITTEXT IDC_SIDEBAR_WEATHER_NEW_CITY,3,27,112,14,ES_AUTOHSCROLL
DEFPUSHBUTTON "Add Location",IDC_SIDEBAR_WEATHER_ADD,119,27,50,14
LISTBOX IDC_SIDEBAR_WEATHER_CURR_CITIES,3,48,127,89,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Move Up",IDC_SIDEBAR_WEATHER_MOVE_UP,134,104,50,14
PUSHBUTTON "Move Down",IDC_SIDEBAR_WEATHER_MOVE_DOWN,134,121,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_WEATHER_DELETE,134,48,50,14
LTEXT "To see current weather conditions and forecasts in the USA, enter the zip code (example: 94043) or city and state (example: Mountain View, CA).",
IDC_STATIC,3,0,199,25
CONTROL "Fahrenheit",IDC_SIDEBAR_WEATHER_FAHRENHEIT,"Button",
BS_AUTORADIOBUTTON | WS_GROUP | WS_TABSTOP,3,144,51,10
CONTROL "Celsius",IDC_SIDEBAR_WEATHER_CELSIUS,"Button",
BS_AUTORADIOBUTTON,57,144,38,10
END'''.strip()), 'IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless(len(dialog.GetTextualIds()) == 10)
def testRegressionDialogBox2(self):
dialog = rc.Dialog(StringIO('''
IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE DIALOG DISCARDABLE 0, 0, 264, 220
STYLE WS_CHILD
FONT 8, "MS Shell Dlg"
BEGIN
GROUPBOX "Email Filters",IDC_STATIC,7,3,250,190
LTEXT "Click Add Filter to create the email filter.",IDC_STATIC,16,41,130,9
PUSHBUTTON "Add Filter...",IDC_SIDEBAR_EMAIL_ADD_FILTER,196,38,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_EMAIL_REMOVE,196,174,50,14
PUSHBUTTON "", IDC_SIDEBAR_EMAIL_HIDDEN, 200, 178, 5, 5, NOT WS_VISIBLE
LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
LTEXT "You can prevent certain emails from showing up in the sidebar with a filter.",
IDC_STATIC,16,18,234,18
END'''.strip()), 'IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds())
def testRegressionMenuId(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "HyperFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 2)
def testRegressionNewlines(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\nFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\n (the \n shouldn't be changed to \\n)
self.failUnless(transl.find('\\\\n') == -1)
def testRegressionTabs(self):
menu = rc.Menu(StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\tFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\t (the \t shouldn't be changed to \\t)
self.failUnless(transl.find('\\\\t') == -1)
def testEscapeUnescape(self):
original = 'Hello "bingo"\n How\\are\\you\\n?'
escaped = rc.Section.Escape(original)
self.failUnless(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?')
unescaped = rc.Section.UnEscape(escaped)
self.failUnless(unescaped == original)
def testRegressionPathsWithSlashN(self):
original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif'
unescaped = rc.Section.UnEscape(original)
self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif')
def testRegressionDialogItemsTextOnly(self):
dialog = rc.Dialog(StringIO('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_DISABLED | WS_CAPTION | WS_SYSMENU
CAPTION "Search"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
GROUPBOX "Select search buttons and options",-1,7,5,266,262
CONTROL "",IDC_OPTIONS,"SysTreeView32",TVS_DISABLEDRAGDROP |
WS_BORDER | WS_TABSTOP | 0x800,16,19,248,218
LTEXT "Use Google site:",-1,26,248,52,8
COMBOBOX IDC_GOOGLE_HOME,87,245,177,256,CBS_DROPDOWNLIST |
WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Restore Defaults...",IDC_RESET,187,272,86,14
END'''), 'IDD_OPTIONS_SEARCH')
dialog.Parse()
translateables = [c.GetMessage().GetRealContent()
for c in dialog.GetCliques()]
self.failUnless('Select search buttons and options' in translateables)
self.failUnless('Use Google site:' in translateables)
def testAccelerators(self):
acc = rc.Accelerators(StringIO('''\
IDR_ACCELERATOR1 ACCELERATORS
BEGIN
"^C", ID_ACCELERATOR32770, ASCII, NOINVERT
"^V", ID_ACCELERATOR32771, ASCII, NOINVERT
VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
END
'''), 'IDR_ACCELERATOR1')
acc.Parse()
self.failUnless(len(acc.GetTextualIds()) == 4)
self.failUnless(len(acc.GetCliques()) == 0)
transl = acc.Translate('en')
self.failUnless(transl.strip() == acc.GetText().strip())
def testRegressionEmptyString(self):
dlg = rc.Dialog(StringIO('''\
IDD_CONFIRM_QUIT_GD_DLG DIALOGEX 0, 0, 267, 108
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_CAPTION
EXSTYLE WS_EX_TOPMOST
CAPTION "Google Desktop"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
DEFPUSHBUTTON "&Yes",IDYES,82,87,50,14
PUSHBUTTON "&No",IDNO,136,87,50,14
ICON 32514,IDC_STATIC,7,9,21,20
EDITTEXT IDC_TEXTBOX,34,7,231,60,ES_MULTILINE | ES_READONLY | NOT WS_BORDER
CONTROL "",
IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
WS_TABSTOP,33,70,231,10
END'''), 'IDD_CONFIRM_QUIT_GD_DLG')
dlg.Parse()
def Check():
self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART'))
self.failUnless(transl.count('END'))
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c60c6e1dfc4dcb3231ebefd23892eea3",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 189,
"avg_line_length": 35.23160762942779,
"alnum_prop": 0.6211910286156226,
"repo_name": "scheib/chromium",
"id": "4f53b1269dba3bbf8a2059076c6614575f55c462",
"size": "13120",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/grit/grit/gather/rc_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import uuid
from copy import copy
from collections import defaultdict
from django.conf.urls import url
from django.db import connections
from django.utils.translation import ugettext_lazy as _, ungettext_lazy as __
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.forms import SQLSelectForm
from debug_toolbar.utils import render_stacktrace
from debug_toolbar.panels.sql.utils import reformat_sql, contrasting_color_generator
from debug_toolbar.panels.sql.tracking import wrap_cursor, unwrap_cursor
from debug_toolbar.panels.sql import views
def get_isolation_level_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT: _("Autocommit"),
psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED: _("Read uncommitted"),
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED: _("Read committed"),
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ: _("Repeatable read"),
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE: _("Serializable"),
}
else:
raise ValueError(vendor)
return choices.get(level)
def get_transaction_status_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.TRANSACTION_STATUS_IDLE: _("Idle"),
psycopg2.extensions.TRANSACTION_STATUS_ACTIVE: _("Active"),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS: _("In transaction"),
psycopg2.extensions.TRANSACTION_STATUS_INERROR: _("In error"),
psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN: _("Unknown"),
}
else:
raise ValueError(vendor)
return choices.get(level)
class SQLPanel(Panel):
"""
Panel that displays information about the SQL queries run while processing
the request.
"""
def __init__(self, *args, **kwargs):
super(SQLPanel, self).__init__(*args, **kwargs)
self._offset = dict((k, len(connections[k].queries)) for k in connections)
self._sql_time = 0
self._num_queries = 0
self._queries = []
self._databases = {}
self._transaction_status = {}
self._transaction_ids = {}
def get_transaction_id(self, alias):
if alias not in connections:
return
conn = connections[alias].connection
if not conn:
return
if conn.vendor == 'postgresql':
cur_status = conn.get_transaction_status()
else:
raise ValueError(conn.vendor)
last_status = self._transaction_status.get(alias)
self._transaction_status[alias] = cur_status
if not cur_status:
# No available state
return None
if cur_status != last_status:
if cur_status:
self._transaction_ids[alias] = uuid.uuid4().hex
else:
self._transaction_ids[alias] = None
return self._transaction_ids[alias]
def record(self, alias, **kwargs):
self._queries.append((alias, kwargs))
if alias not in self._databases:
self._databases[alias] = {
'time_spent': kwargs['duration'],
'num_queries': 1,
}
else:
self._databases[alias]['time_spent'] += kwargs['duration']
self._databases[alias]['num_queries'] += 1
self._sql_time += kwargs['duration']
self._num_queries += 1
# Implement the Panel API
nav_title = _("SQL")
@property
def nav_subtitle(self):
return __("%d query in %.2fms", "%d queries in %.2fms", self._num_queries
) % (self._num_queries, self._sql_time)
@property
def title(self):
count = len(self._databases)
return __(
'SQL queries from %(count)d connection', 'SQL queries from %(count)d connections', count
) % {
'count': count
}
template = 'debug_toolbar/panels/sql.html'
@classmethod
def get_urls(cls):
return [
url(r'^sql_select/$', views.sql_select, name='sql_select'),
url(r'^sql_explain/$', views.sql_explain, name='sql_explain'),
url(r'^sql_profile/$', views.sql_profile, name='sql_profile'),
]
def enable_instrumentation(self):
# This is thread-safe because database connections are thread-local.
for connection in connections.all():
wrap_cursor(connection, self)
def disable_instrumentation(self):
for connection in connections.all():
unwrap_cursor(connection)
def process_response(self, request, response):
colors = contrasting_color_generator()
trace_colors = defaultdict(lambda: next(colors))
query_duplicates = defaultdict(lambda: defaultdict(int))
if self._queries:
width_ratio_tally = 0
factor = int(256.0 / (len(self._databases) * 2.5))
for n, db in enumerate(self._databases.values()):
rgb = [0, 0, 0]
color = n % 3
rgb[color] = 256 - n / 3 * factor
nn = color
# XXX: pretty sure this is horrible after so many aliases
while rgb[color] < factor:
nc = min(256 - rgb[color], 256)
rgb[color] += nc
nn += 1
if nn > 2:
nn = 0
rgb[nn] = nc
db['rgb_color'] = rgb
trans_ids = {}
trans_id = None
i = 0
for alias, query in self._queries:
query_duplicates[alias][query["raw_sql"]] += 1
trans_id = query.get('trans_id')
last_trans_id = trans_ids.get(alias)
if trans_id != last_trans_id:
if last_trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
trans_ids[alias] = trans_id
if trans_id:
query['starts_trans'] = True
if trans_id:
query['in_trans'] = True
query['alias'] = alias
if 'iso_level' in query:
query['iso_level'] = get_isolation_level_display(
query['vendor'], query['iso_level']
)
if 'trans_status' in query:
query['trans_status'] = get_transaction_status_display(
query['vendor'], query['trans_status']
)
query['form'] = SQLSelectForm(auto_id=None, initial=copy(query))
if query['sql']:
query['sql'] = reformat_sql(query['sql'])
query['rgb_color'] = self._databases[alias]['rgb_color']
try:
query['width_ratio'] = (query['duration'] / self._sql_time) * 100
query['width_ratio_relative'
] = (100.0 * query['width_ratio'] / (100.0 - width_ratio_tally))
except ZeroDivisionError:
query['width_ratio'] = 0
query['width_ratio_relative'] = 0
query['start_offset'] = width_ratio_tally
query['end_offset'] = query['width_ratio'] + query['start_offset']
width_ratio_tally += query['width_ratio']
query['stacktrace'] = render_stacktrace(query['stacktrace'])
i += 1
query['trace_color'] = trace_colors[query['stacktrace']]
if trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
# Queries are duplicates only if there's as least 2 of them.
# Also, to hide queries, we need to give all the duplicate groups an id
query_duplicates = dict(
(
alias, dict(
(query, duplicate_count) for query, duplicate_count in queries.items()
if duplicate_count >= 2
)
) for alias, queries in query_duplicates.items()
)
for alias, query in self._queries:
try:
duplicates_count = query_duplicates[alias][query["raw_sql"]]
query["duplicate_count"] = duplicates_count
except KeyError:
pass
for alias, alias_info in self._databases.items():
try:
alias_info["duplicate_count"] = sum(e for e in query_duplicates[alias].values())
except KeyError:
pass
self.record_stats(
{
'databases': sorted(self._databases.items(), key=lambda x: -x[1]['time_spent']),
'queries': [q for a, q in self._queries],
'sql_time': self._sql_time,
}
)
|
{
"content_hash": "af8833ca8dd4a077fff0f62ca5fc6a8e",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 100,
"avg_line_length": 36.93469387755102,
"alnum_prop": 0.5420488451762626,
"repo_name": "looker/sentry",
"id": "8eda4650c2fda2565745276ae6ad3aa1b19ef895",
"size": "9049",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/debug_toolbar/panels/sql/panel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
"""Provides device automations for Cover."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ABOVE,
CONF_BELOW,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.entity import get_supported_features
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
# mypy: disallow-any-generics
POSITION_CONDITION_TYPES = {"is_position", "is_tilt_position"}
STATE_CONDITION_TYPES = {"is_open", "is_closed", "is_opening", "is_closing"}
POSITION_CONDITION_SCHEMA = vol.All(
DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_CONDITION_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_CONDITION_TYPES),
}
)
CONDITION_SCHEMA = vol.Any(POSITION_CONDITION_SCHEMA, STATE_CONDITION_SCHEMA)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device conditions for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
conditions: list[dict[str, str]] = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
supported_features = get_supported_features(hass, entry.entity_id)
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add conditions for each entity that belongs to this integration
base_condition = {
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
}
if supports_open_close:
conditions += [
{**base_condition, CONF_TYPE: cond} for cond in STATE_CONDITION_TYPES
]
if supported_features & SUPPORT_SET_POSITION:
conditions.append({**base_condition, CONF_TYPE: "is_position"})
if supported_features & SUPPORT_SET_TILT_POSITION:
conditions.append({**base_condition, CONF_TYPE: "is_tilt_position"})
return conditions
async def async_get_condition_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List condition capabilities."""
if config[CONF_TYPE] not in ["is_position", "is_tilt_position"]:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
@callback
def async_condition_from_config(
hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config[CONF_TYPE] in STATE_CONDITION_TYPES:
if config[CONF_TYPE] == "is_open":
state = STATE_OPEN
elif config[CONF_TYPE] == "is_closed":
state = STATE_CLOSED
elif config[CONF_TYPE] == "is_opening":
state = STATE_OPENING
elif config[CONF_TYPE] == "is_closing":
state = STATE_CLOSING
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
if config[CONF_TYPE] == "is_position":
position_attr = "current_position"
if config[CONF_TYPE] == "is_tilt_position":
position_attr = "current_tilt_position"
min_pos = config.get(CONF_ABOVE)
max_pos = config.get(CONF_BELOW)
@callback
def check_numeric_state(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Return whether the criteria are met."""
return condition.async_numeric_state(
hass, config[ATTR_ENTITY_ID], max_pos, min_pos, attribute=position_attr
)
return check_numeric_state
|
{
"content_hash": "d4a48048967ee5a970b8f2af0dd07767",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 85,
"avg_line_length": 32.71518987341772,
"alnum_prop": 0.628554846198491,
"repo_name": "jawilson/home-assistant",
"id": "cca608187a27ab6ef485609ef6e2bb4e1289cc5d",
"size": "5169",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "homeassistant/components/cover/device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from .voxel_dir import task_dir, storage_dir, image_dir, image_sha_dir, get_image_sha_dir, log_dir
|
{
"content_hash": "2dc0e42fcbd0182413ed44108e989e6a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 98,
"avg_line_length": 98,
"alnum_prop": 0.7551020408163265,
"repo_name": "VisionSystemsInc/voxel_globe",
"id": "b4adc1a4e66c24e440460c768da7c254ce4a3a84",
"size": "98",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "voxel_globe/tools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10038"
},
{
"name": "HTML",
"bytes": "124988"
},
{
"name": "JavaScript",
"bytes": "296605"
},
{
"name": "Nginx",
"bytes": "2623"
},
{
"name": "Python",
"bytes": "377549"
},
{
"name": "Shell",
"bytes": "100713"
}
],
"symlink_target": ""
}
|
import abc
import collections
import uuid
class SessionBase(collections.UserDict):
def __init__(self, session_key=None):
super(SessionBase, self).__init__()
self.session_key = session_key
self.modified = False
def is_empty(self):
return not (self.session_key and self.data)
def __setitem__(self, key, value):
self.data[key] = value
self.modified = True
def __delitem__(self, key):
del self.data[key]
self.modified = True
def pop(self, key, default=None):
self.modified = self.modified or key in self.data
return self.data.pop(key, default)
def setdefault(self, key, value):
if key in self.data:
return self.data[key]
else:
self.modified = True
self.data[key] = value
return value
def update(self, dict_):
self.data.update(dict_)
self.modified = True
def clear(self):
self.data = {}
self.accessed = True
self.modified = True
def _get_or_create_session_key(self):
if not self.session_key or not self.exists(self.session_key):
self.session_key = self._new_session_key()
return self.session_key
def _new_session_key(self):
return uuid.uuid4().hex
@abc.abstractmethod
def create(self):
pass
@abc.abstractmethod
def exists(self, session_key=None):
pass
@abc.abstractmethod
def save(self):
pass
@abc.abstractmethod
def delete(self, session_key=None):
pass
def before_request_hook(request, view_func, app):
session_key = request.cookies.get(app.config['SESSION_COOKIE_NAME'])
request.session = app.session_class(session_key=session_key)
def after_request_hook(request, response, view_func, app):
session = request.session
if not session.modified:
return
config = app.config
cookie_name = config['SESSION_COOKIE_NAME']
if session.is_empty():
if cookie_name in request.cookies:
session.delete()
response.delete_cookie(cookie_name)
else:
session.save()
response.set_cookie(
cookie_name, value=session.session_key,
max_age=config['SESSION_COOKIE_MAX_AGE'],
expires=None, path=config['SESSION_COOKIE_PATH'],
domain=config['SESSION_COOKIE_DOMAIN'],
secure=config['SESSION_COOKIE_SECURE'],
httponly=config['SESSION_COOKIE_HTTPONLY']
)
return response
class MemorySession(SessionBase):
_sessions = {}
def __init__(self, session_key=None):
super(MemorySession, self).__init__(session_key=session_key)
self.load(session_key)
def load(self, session_key):
_sessions = self.__class__._sessions
if session_key not in _sessions:
self.create()
else:
self.data = _sessions[self.session_key]
def exists(self, key):
_sessions = self.__class__._sessions
return key in _sessions
def create(self):
self.modified = True
self.save(must_create=True)
def save(self, must_create=False):
session_key = self._get_or_create_session_key()
_sessions = self.__class__._sessions
if self.data or must_create:
if session_key in _sessions and not self.modified:
self.data = _sessions[self.session_key]
else:
_sessions[self.session_key] = self.data
else:
self.delete()
def delete(self, session_key=None):
_sessions = self.__class__._sessions
session_key = session_key or self.session_key
_sessions.pop(session_key, None)
self.modified = True
|
{
"content_hash": "f17f8ea4453a037d76354cd32173d623",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 72,
"avg_line_length": 27.772058823529413,
"alnum_prop": 0.5962404024357956,
"repo_name": "mozillazg/bustard",
"id": "1ebd7ee019cf79c890295ee8bc51ac580900fb13",
"size": "3801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bustard/sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19011"
},
{
"name": "Makefile",
"bytes": "112"
},
{
"name": "Python",
"bytes": "157593"
}
],
"symlink_target": ""
}
|
""""
Operators Adventure
Author: Ignacio Avas (iavas@sophilabs.com)
"""
import codecs
import io
import sys
import unittest
from story.adventures import AdventureVerificationError, BaseAdventure
from story.translation import gettext as _
import ast
class TestOutput(unittest.TestCase):
"""Variables Adventure test"""
def __init__(self, candidate_code, file_name='<inline>'):
"""Init the test"""
super(TestOutput, self).__init__()
self.candidate_code = candidate_code
self.file_name = file_name
def setUp(self):
self.__old_stdout = sys.stdout
sys.stdout = self.__mockstdout = io.StringIO()
def tearDown(self):
sys.stdout = self.__old_stdout
self.__mockstdout.close()
def runTest(self):
"""Makes a simple test of the output"""
body = ast.parse(self.candidate_code, self.file_name, 'exec')
code = compile(self.candidate_code, self.file_name, 'exec')
mult_instructions = [
node for node in ast.walk(body)
if isinstance(node, ast.Mult)
]
self.assertGreater(len(mult_instructions),
0,
"It should have at least one duplication"
)
exec(code)
self.assertMultiLineEqual('ka'*10+'\n',
self.__mockstdout.getvalue(),
"Should have printed ka 10 times")
class Adventure(BaseAdventure):
title = _('Operators')
@classmethod
def test(cls, sourcefile):
"""Test against the provided file"""
suite = unittest.TestSuite()
raw_program = codecs.open(sourcefile).read()
suite.addTest(TestOutput(raw_program, sourcefile))
result = unittest.TextTestRunner().run(suite)
if not result.wasSuccessful():
raise AdventureVerificationError()
|
{
"content_hash": "5cff42c0c42ae1d9ad88b17924580f38",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 30.822580645161292,
"alnum_prop": 0.5944531658817374,
"repo_name": "sophilabs/py101",
"id": "615f307e98a418708956d48707aa87b89d148ae3",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py101/operators/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2957"
},
{
"name": "Python",
"bytes": "39468"
}
],
"symlink_target": ""
}
|
"""
Image cache class
Images that are stored in the cache folder will be stored in a folder whose
name is the image ID. In the event that an image is discovered to be no longer
used then a timestamp will be added to the image folder.
The timestamp will be a folder - this is due to the fact that we can use the
VMware API's for creating and deleting of folders (it really simplifies
things). The timestamp will contain the time, on the compute node, when the
image was first seen to be unused.
At each aging iteration we check if the image can be aged.
This is done by comparing the current nova compute time to the time embedded
in the timestamp. If the time exceeds the configured aging time then
the parent folder, that is the image ID folder, will be deleted.
That effectively ages the cached image.
If an image is used then the timestamps will be deleted.
When accessing a timestamp we make use of locking. This ensure that aging
will not delete an image during the spawn operiation. When spawning
the timestamp folder will be locked and the timestamps will be purged.
This will ensure that a image is not deleted during the spawn.
"""
from oslo.config import cfg
from nova.i18n import _
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.virt import imagecache
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('remove_unused_original_minimum_age_seconds',
'nova.virt.imagecache')
TIMESTAMP_PREFIX = 'ts-'
TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S'
class ImageCacheManager(imagecache.ImageCacheManager):
def __init__(self, session, base_folder):
super(ImageCacheManager, self).__init__()
self._session = session
self._base_folder = base_folder
self._ds_browser = {}
def _folder_delete(self, ds_path, dc_ref):
try:
ds_util.file_delete(self._session, ds_path, dc_ref)
except (error_util.CannotDeleteFileException,
error_util.FileFaultException,
error_util.FileLockedException) as e:
# There may be more than one process or thread that tries
# to delete the file.
LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"),
{'file': ds_path, 'ex': e})
except error_util.FileNotFoundException:
LOG.debug("File not found: %s", ds_path)
def timestamp_folder_get(self, ds_path, image_id):
"""Returns the timestamp folder."""
return ds_path.join(image_id)
def timestamp_cleanup(self, dc_ref, ds_browser, ds_path):
ts = self._get_timestamp(ds_browser, ds_path)
if ts:
ts_path = ds_path.join(ts)
LOG.debug("Timestamp path %s exists. Deleting!", ts_path)
# Image is used - no longer need timestamp folder
self._folder_delete(ts_path, dc_ref)
def _get_timestamp(self, ds_browser, ds_path):
files = ds_util.get_sub_folders(self._session, ds_browser, ds_path)
if files:
for file in files:
if file.startswith(TIMESTAMP_PREFIX):
return file
def _get_timestamp_filename(self):
return '%s%s' % (TIMESTAMP_PREFIX,
timeutils.strtime(fmt=TIMESTAMP_FORMAT))
def _get_datetime_from_filename(self, timestamp_filename):
ts = timestamp_filename.lstrip(TIMESTAMP_PREFIX)
return timeutils.parse_strtime(ts, fmt=TIMESTAMP_FORMAT)
def _get_ds_browser(self, ds_ref):
ds_browser = self._ds_browser.get(ds_ref.value)
if not ds_browser:
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(), ds_ref,
"Datastore", "browser")
self._ds_browser[ds_ref.value] = ds_browser
return ds_browser
def _list_datastore_images(self, ds_path, datastore):
"""Return a list of the images present in _base.
This method returns a dictionary with the following keys:
- unexplained_images
- originals
"""
ds_browser = self._get_ds_browser(datastore.ref)
originals = ds_util.get_sub_folders(self._session, ds_browser,
ds_path)
return {'unexplained_images': [],
'originals': originals}
def _age_cached_images(self, context, datastore, dc_info,
ds_path):
"""Ages cached images."""
age_seconds = CONF.remove_unused_original_minimum_age_seconds
unused_images = self.originals - self.used_images
ds_browser = self._get_ds_browser(datastore.ref)
for image in unused_images:
path = self.timestamp_folder_get(ds_path, image)
# Lock to ensure that the spawn will not try and access a image
# that is currently being deleted on the datastore.
with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
external=True):
ts = self._get_timestamp(ds_browser, path)
if not ts:
ts_path = path.join(self._get_timestamp_filename())
try:
ds_util.mkdir(self._session, ts_path, dc_info.ref)
except error_util.FileAlreadyExistsException:
LOG.debug("Timestamp already exists.")
LOG.info(_("Image %s is no longer used by this node. "
"Pending deletion!"), image)
else:
dt = self._get_datetime_from_filename(str(ts))
if timeutils.is_older_than(dt, age_seconds):
LOG.info(_("Image %s is no longer used. "
"Deleting!"), path)
# Image has aged - delete the image ID folder
self._folder_delete(path, dc_info.ref)
# If the image is used and the timestamp file exists then we delete
# the timestamp.
for image in self.used_images:
path = self.timestamp_folder_get(ds_path, image)
with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
external=True):
self.timestamp_cleanup(dc_info.ref, ds_browser,
path)
def update(self, context, instances, datastores_info):
"""The cache manager entry point.
This will invoke the cache manager. This will update the cache
according to the defined cache management scheme. The information
populated in the cached stats will be used for the cache management.
"""
# read running instances data
running = self._list_running_instances(context, instances)
self.used_images = set(running['used_images'].keys())
# perform the aging and image verification per datastore
for (datastore, dc_info) in datastores_info:
ds_path = datastore.build_path(self._base_folder)
images = self._list_datastore_images(ds_path, datastore)
self.originals = images['originals']
self._age_cached_images(context, datastore, dc_info, ds_path)
|
{
"content_hash": "4675658574c0c66de23172249463a5ea",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 78,
"avg_line_length": 44.706586826347305,
"alnum_prop": 0.6158585587998928,
"repo_name": "srajag/nova",
"id": "5051c210851071ccaa5d8e0ebb95744a6e53ad40",
"size": "8075",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/imagecache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import happy.HappyProcessStart
from happy.Utils import *
if __name__ == "__main__":
options = happy.HappyProcessStart.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:qt:se:",
["help", "id=", "quiet", "tag=", "strace", "env="])
except getopt.GetoptError as err:
print(happy.HappyProcessStart.HappyProcessStart.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed to parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(happy.HappyProcessStart.HappyProcessStart.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-i", "--id"):
options["node_id"] = a
elif o in ("-t", "--tag"):
options["tag"] = a
elif o in ("-s", "--strace"):
options["strace"] = True
elif o in ("-e", "--env"):
options["env"] = a
else:
assert False, "unhandled option"
if len(args) > 2 and options["node_id"] is None and options["tag"] is None:
options["node_id"] = args[0]
options["tag"] = args[1]
options["command"] = " ".join(args[2:])
else:
options["command"] = " ".join(args[:])
if options["env"]:
dic = {}
# convert the string to a dictionary
for var in options["env"].split():
lh, rh = var.split("=")
dic[lh] = rh
options["env"] = dic
cmd = happy.HappyProcessStart.HappyProcessStart(options)
cmd.start()
|
{
"content_hash": "da047ddb071e3cb7dd7d9e9cbb2ffc78",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 28.583333333333332,
"alnum_prop": 0.5154518950437318,
"repo_name": "openweave/happy",
"id": "25fabb18f647281feb7c370816c48e6dc516a891",
"size": "2570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/happy-process-start.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "8386"
},
{
"name": "Python",
"bytes": "515221"
},
{
"name": "Shell",
"bytes": "8706"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.models import JunctionTree
from pgmpy.tests import help_functions as hf
class TestJunctionTreeCreation(unittest.TestCase):
def setUp(self):
self.graph = JunctionTree()
def test_add_single_node(self):
self.graph.add_node(('a', 'b'))
self.assertListEqual(self.graph.nodes(), [('a', 'b')])
def test_add_single_node_raises_error(self):
self.assertRaises(TypeError, self.graph.add_node, 'a')
def test_add_multiple_nodes(self):
self.graph.add_nodes_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(hf.recursive_sorted(self.graph.nodes()),
[['a', 'b'], ['b', 'c']])
def test_add_single_edge(self):
self.graph.add_edge(('a', 'b'), ('b', 'c'))
self.assertListEqual(hf.recursive_sorted(self.graph.nodes()),
[['a', 'b'], ['b', 'c']])
self.assertListEqual(sorted([node for edge in self.graph.edges()
for node in edge]),
[('a', 'b'), ('b', 'c')])
def test_add_single_edge_raises_error(self):
self.assertRaises(ValueError, self.graph.add_edge,
('a', 'b'), ('c', 'd'))
def test_add_cyclic_path_raises_error(self):
self.graph.add_edge(('a', 'b'), ('b', 'c'))
self.graph.add_edge(('b', 'c'), ('c', 'd'))
self.assertRaises(ValueError, self.graph.add_edge, ('c', 'd'), ('a', 'b'))
def tearDown(self):
del self.graph
class TestJunctionTreeMethods(unittest.TestCase):
def setUp(self):
self.factor1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
self.factor2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
self.factor3 = DiscreteFactor(['d', 'e'], [2, 2], np.random.rand(4))
self.factor4 = DiscreteFactor(['e', 'f'], [2, 2], np.random.rand(4))
self.factor5 = DiscreteFactor(['a', 'b', 'e'], [2, 2, 2], np.random.rand(8))
self.graph1 = JunctionTree()
self.graph1.add_edge(('a', 'b'), ('b', 'c'))
self.graph1.add_factors(self.factor1, self.factor2)
self.graph2 = JunctionTree()
self.graph2.add_nodes_from([('a', 'b'), ('b', 'c'), ('d', 'e')])
self.graph2.add_edge(('a', 'b'), ('b', 'c'))
self.graph2.add_factors(self.factor1, self.factor2, self.factor3)
self.graph3 = JunctionTree()
self.graph3.add_edges_from([(('a', 'b'), ('b', 'c')), (('d', 'e'), ('e', 'f'))])
self.graph3.add_factors(self.factor1, self.factor2, self.factor3, self.factor4)
self.graph4 = JunctionTree()
self.graph4.add_edges_from([(('a', 'b', 'e'), ('b', 'c')), (('a', 'b', 'e'), ('e', 'f')),
(('d', 'e'), ('e', 'f'))])
self.graph4.add_factors(self.factor5, self.factor2, self.factor3, self.factor4)
def test_check_model(self):
self.assertRaises(ValueError, self.graph2.check_model)
self.assertRaises(ValueError, self.graph3.check_model)
self.assertTrue(self.graph1.check_model())
self.assertTrue(self.graph4.check_model())
def tearDown(self):
del self.factor1
del self.factor2
del self.factor3
del self.factor4
del self.factor5
del self.graph1
del self.graph2
del self.graph3
del self.graph4
class TestJunctionTreeCopy(unittest.TestCase):
def setUp(self):
self.graph = JunctionTree()
def test_copy_with_nodes(self):
self.graph.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
self.graph.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
(('a', 'b', 'c'), ('a', 'c'))])
graph_copy = self.graph.copy()
self.graph.remove_edge(('a', 'b', 'c'), ('a', 'c'))
self.assertFalse(self.graph.has_edge(('a', 'b', 'c'), ('a', 'c')))
self.assertTrue(graph_copy.has_edge(('a', 'b', 'c'), ('a', 'c')))
self.graph.remove_node(('a', 'c'))
self.assertFalse(self.graph.has_node(('a', 'c')))
self.assertTrue(graph_copy.has_node(('a', 'c')))
self.graph.add_node(('c', 'd'))
self.assertTrue(self.graph.has_node(('c', 'd')))
self.assertFalse(graph_copy.has_node(('c', 'd')))
def test_copy_with_factors(self):
self.graph.add_edges_from([[('a', 'b'), ('b', 'c')]])
phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
graph_copy = self.graph.copy()
self.assertIsInstance(graph_copy, JunctionTree)
self.assertIsNot(self.graph, graph_copy)
self.assertEqual(hf.recursive_sorted(self.graph.nodes()),
hf.recursive_sorted(graph_copy.nodes()))
self.assertEqual(hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted(graph_copy.edges()))
self.assertTrue(graph_copy.check_model())
self.assertEqual(self.graph.get_factors(), graph_copy.get_factors())
self.graph.remove_factors(phi1, phi2)
self.assertTrue(phi1 not in self.graph.factors and phi2 not in self.graph.factors)
self.assertTrue(phi1 in graph_copy.factors and phi2 in graph_copy.factors)
self.graph.add_factors(phi1, phi2)
self.graph.factors[0] = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
self.assertNotEqual(self.graph.get_factors()[0], graph_copy.get_factors()[0])
self.assertNotEqual(self.graph.factors, graph_copy.factors)
def test_copy_with_factorchanges(self):
self.graph.add_edges_from([[('a', 'b'), ('b', 'c')]])
phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
graph_copy = self.graph.copy()
self.graph.factors[0].reduce([('a', 0)])
self.assertNotEqual(self.graph.factors[0].scope(), graph_copy.factors[0].scope())
self.assertNotEqual(self.graph, graph_copy)
self.graph.factors[1].marginalize(['b'])
self.assertNotEqual(self.graph.factors[1].scope(), graph_copy.factors[1].scope())
self.assertNotEqual(self.graph, graph_copy)
def tearDown(self):
del self.graph
|
{
"content_hash": "a7b9a241c410d00f2ad508eb362dd75c",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 97,
"avg_line_length": 43.24183006535948,
"alnum_prop": 0.5488210399032648,
"repo_name": "kris-singh/pgmpy",
"id": "bb8a4942f95209920f591226b3c531ca40669aff",
"size": "6616",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "pgmpy/tests/test_models/test_JunctionTree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293240"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
}
|
import json
from datetime import datetime
from time import sleep
import numpy as np
from flask import Flask, Response, make_response, request
from bokeh.models import CustomJS, ServerSentDataSource
from bokeh.plotting import figure, output_file, show
# Bokeh related code
adapter = CustomJS(code="""
const result = {x: [], y: []}
const pts = cb_data.response
for (let i=0; i<pts.length; i++) {
result.x.push(pts[i][0])
result.y.push(pts[i][1])
}
return result
""")
source = ServerSentDataSource(data_url='http://localhost:5050/data', max_size=100,
mode='append', adapter=adapter)
p = figure(plot_height=800, plot_width=800, background_fill_color="lightgrey",
title="Streaming via Server Sent Events", x_range=[-5,5], y_range=[-5,5])
p.circle('x', 'y', source=source)
# Flask related code
app = Flask(__name__)
def crossdomain(f):
def wrapped_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
h['Access-Control-Allow-Origin'] = '*'
h['Access-Control-Allow-Methods'] = "GET, OPTIONS, POST"
h['Access-Control-Max-Age'] = str(21600)
requested_headers = request.headers.get('Access-Control-Request-Headers')
if requested_headers:
h['Access-Control-Allow-Headers'] = requested_headers
return resp
return wrapped_function
@app.route('/data', methods=['GET', 'OPTIONS'])
@crossdomain
def stream():
def event_stream():
"""No global state used"""
while True:
t = datetime.now().timestamp()
v = np.sin(t*5) + 0.2*np.random.random() + 3
x = v*np.sin(t)
y = v*np.cos(t)
data = [[x, y]]
yield "data: "+json.dumps(data)+"\n\n"
sleep(0.1)
resp = Response(event_stream(), mimetype="text/event-stream")
resp.headers['Cache-Control'] = 'no-cache'
return resp
# show and run
output_file("plot.html", title='Bokeh Plot', mode='inline')
show(p)
app.run(port=5050)
|
{
"content_hash": "7d8c81c99685f361b8b5b58bfadf09ae",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 29.542857142857144,
"alnum_prop": 0.6088007736943907,
"repo_name": "ericmjl/bokeh",
"id": "3768a0af43fb2e3d356053962d5cad4833df6b03",
"size": "2068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/howto/server_sent_events_source.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
"""Defines built-in ensemble methods and interfaces for custom ensembles."""
# TODO: Add more detailed documentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.ensemble.ensembler import Ensemble
from adanet.ensemble.ensembler import Ensembler
from adanet.ensemble.ensembler import TrainOpSpec
from adanet.ensemble.mean import MeanEnsemble
from adanet.ensemble.mean import MeanEnsembler
from adanet.ensemble.strategy import AllStrategy
from adanet.ensemble.strategy import Candidate
from adanet.ensemble.strategy import GrowStrategy
from adanet.ensemble.strategy import SoloStrategy
from adanet.ensemble.strategy import Strategy
from adanet.ensemble.weighted import ComplexityRegularized
from adanet.ensemble.weighted import ComplexityRegularizedEnsembler
from adanet.ensemble.weighted import MixtureWeightType
from adanet.ensemble.weighted import WeightedSubnetwork
__all__ = [
"Ensemble",
"Ensembler",
"TrainOpSpec",
"AllStrategy",
"Candidate",
"GrowStrategy",
"SoloStrategy",
"Strategy",
"ComplexityRegularized",
"ComplexityRegularizedEnsembler",
"MeanEnsemble",
"MeanEnsembler",
"MixtureWeightType",
"WeightedSubnetwork",
]
|
{
"content_hash": "a7f67ca2bd0cd6735ab726d637eb0f44",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.792259083728278,
"repo_name": "tensorflow/adanet",
"id": "cfb312cfea232b570e004df32781ae5fb12a3f57",
"size": "1869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adanet/ensemble/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1914501"
},
{
"name": "Python",
"bytes": "1047162"
},
{
"name": "Shell",
"bytes": "2927"
},
{
"name": "Starlark",
"bytes": "28690"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
VERSION = '0.1.0'
setup(
namespace_packages = ['tiddlywebplugins'],
name = 'tiddlywebplugins.mongodb',
version = VERSION,
description = 'A ',
long_description=file(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = 'Ben Paddock',
url = 'http://pypi.python.org/pypi/tiddlywebplugins.mongodb',
packages = find_packages(exclude=['test']),
author_email = 'pads@thisispads.me.uk',
platforms = 'Posix; MacOS X; Windows',
install_requires = ['tiddlyweb', 'pymongo'],
extras_require = {
'testing': ['pytest', 'mock', 'tiddlywebplugins.utils'],
'coverage': ['pytest-cov', 'python-coveralls'],
'style': ['pep8']
},
zip_safe = False,
)
|
{
"content_hash": "6b51f9770c2a589924244c6fe52714e5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 29.958333333333332,
"alnum_prop": 0.6773296244784422,
"repo_name": "pads/tiddlywebplugins.mongodb",
"id": "67315f73a7008f4fd01ccddace022d8e07556150",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19810"
}
],
"symlink_target": ""
}
|
from haystack import indexes
from icekit.utils.search import AbstractLayoutIndex
from . import models
class WorkIndex(AbstractLayoutIndex, indexes.Indexable):
def get_model(self):
return models.WorkBase
class CreatorIndex(AbstractLayoutIndex, indexes.Indexable):
def get_model(self):
return models.CreatorBase
|
{
"content_hash": "15d2eedb5c95b95eb389e4fe657dbe75",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 30.545454545454547,
"alnum_prop": 0.7767857142857143,
"repo_name": "ic-labs/glamkit-collections",
"id": "3396f3aec215c3b6f9e89862866a650e4a5a3cf9",
"size": "336",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "glamkit_collections/contrib/work_creator/search_indexes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "561"
},
{
"name": "Python",
"bytes": "171835"
}
],
"symlink_target": ""
}
|
import json
import logging
import random
import string
from django.http import (
HttpResponseForbidden, HttpResponseNotFound, JsonResponse,
)
from django.views.generic import TemplateView, View
from pretix.base.models import Event, Order, OrderPosition
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.helpers.urls import build_absolute_uri
logger = logging.getLogger('pretix.plugins.pretixdroid')
class ConfigView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixplugins/pretixdroid/configuration.html'
permission = 'can_change_orders'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
key = self.request.event.settings.get('pretixdroid_key')
if not key or 'flush_key' in self.request.GET:
key = ''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
range(32))
self.request.event.settings.set('pretixdroid_key', key)
ctx['qrdata'] = json.dumps({
'version': 1,
'url': build_absolute_uri('plugins:pretixdroid:api', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug
}),
'key': key
})
return ctx
class ApiView(View):
def get(self, request, **kwargs):
try:
event = Event.objects.current.get(
slug=self.kwargs['event'],
organizer__slug=self.kwargs['organizer']
)
except Event.DoesNotExist:
return HttpResponseNotFound('Unknown event')
if (not event.settings.get('pretixdroid_key')
or event.settings.get('pretixdroid_key') != request.GET.get('key', '')):
return HttpResponseForbidden('Invalid key')
ops = OrderPosition.objects.current.filter(
order__event=event, order__status=Order.STATUS_PAID,
).select_related('item', 'variation')
data = [
{
'id': op.identity,
'item': str(op.item),
'variation': str(op.variation) if op.variation else None,
'attendee_name': op.attendee_name
}
for op in ops
]
return JsonResponse({'data': data, 'version': 1})
|
{
"content_hash": "5003c62ed378f0a4c10899f81f85199c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 118,
"avg_line_length": 35.23529411764706,
"alnum_prop": 0.6093489148580968,
"repo_name": "awg24/pretix",
"id": "5df600c50efea46830f7b200fe667f8c440e3303",
"size": "2396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pretix/plugins/pretixdroid/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43664"
},
{
"name": "HTML",
"bytes": "167660"
},
{
"name": "JavaScript",
"bytes": "24712"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "643853"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
}
|
from sys import version_info
if version_info.major == 2:
from Tkinter import * # noqa
else:
from tkinter import * # noqa
class CollapsiblePane(LabelFrame):
def __init__(self, master, **kwargs):
self.master = master
self.width = kwargs.get('width', 0)
self.height = kwargs.get('height', 0)
self.text = kwargs.get('text', '')
self.visible = kwargs.get('visible', True)
self.collapsible = kwargs.get('collapsible', False)
self.collapsed = kwargs.get('collapsed', False)
LabelFrame.__init__(
self,
self.master,
text=self.text,
borderwidth=0,
highlightthickness=0,
takefocus=0,
relief=FLAT,
)
self.holder = Frame(
self,
borderwidth=0,
highlightthickness=0,
takefocus=0,
relief=FLAT,
)
self.bind('<1>', self._toggle)
self.config(**kwargs)
def config(self, **kwargs):
if 'visible' in kwargs:
self.visible = kwargs['visible']
if self.visible:
kwargs['borderwidth'] = 2
kwargs['relief'] = GROOVE
else:
kwargs['borderwidth'] = 0
kwargs['relief'] = FLAT
if 'collapsed' in kwargs:
self.collapsed = kwargs['collapsed']
if 'collapsible' in kwargs:
self.collapsible = kwargs['collapsible']
if 'text' in kwargs:
self.text = kwargs['text']
for k, v in kwargs.items():
if k in [
'font', 'text', 'width', 'height', 'relief', 'borderwidth'
]:
Frame.config(self, **{k: v})
if not self.collapsible or not self.collapsed:
self.holder.config(height=None)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.holder.grid(row=0, column=0, sticky=N + S + E + W)
else:
self.holder.config(height=20)
self.holder.grid_remove()
def _toggle(self, event=None):
if self.collapsible:
self.config(collapsed=not self.collapsed)
else:
self.config(collapsed=False)
if __name__ == "__main__":
tk = Tk()
cp = CollapsiblePane(
tk,
text="Collapsible",
visible=True,
collapsible=True,
)
cp.pack(side=TOP, fill=BOTH, expand=1, padx=5, pady=5)
lbl1 = Label(cp.holder, text="This is a text message.")
lbl1.pack(side=TOP)
tk.mainloop()
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
{
"content_hash": "a16f6d1656aa73057ac75bf3efaf08f1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 74,
"avg_line_length": 30.078651685393258,
"alnum_prop": 0.5304445274561076,
"repo_name": "revarbat/belfrywidgets",
"id": "82e6d7011d4e95e214ed5fad9497a16d301384e8",
"size": "2677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "belfrywidgets/collapsiblepane.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "35381"
}
],
"symlink_target": ""
}
|
import json
import couchbase
from couchbase._pyport import basestring
from couchbase.views.iterator import AlreadyQueriedError
from couchbase.exceptions import CouchbaseError, NotSupportedError
class N1QLError(CouchbaseError):
@property
def n1ql_errcode(self):
return self.objextra['code']
CONSISTENCY_REQUEST = 'request_plus'
"""
For use with :attr:`~.N1QLQuery.consistency`, will ensure that query
results always reflect the latest data in the server
"""
CONSISTENCY_NONE = 'none'
"""
For use with :attr:`~.N1QLQuery.consistency`, will allow cached
values to be returned. This will improve performance but may not
reflect the latest data in the server.
"""
class MissingTokenError(CouchbaseError):
pass
class MutationState(object):
"""
.. warning::
The API and implementation of this class are subject to change.
This class acts as a container for one or more mutations. It may
then be used with the :meth:`~.N1QLQuery.consistent_with` method to
indicate that a given query should be bounded by the contained
mutations.
Using `consistent_with` is similar to setting
:attr:`~.N1QLQuery.consistency` to :data:`CONSISTENCY_REQUEST`,
but is more optimal as the query will use cached data, *except*
when the given mutation(s) are concerned. This option is useful
for use patterns when an application has just performed a mutation,
and wishes to perform a query in which the newly-performed mutation
should reflect on the query results.
.. note::
This feature requires Couchbase Server 4.5 or greater,
and also requires that `fetch_mutation_tokens=true`
be specified in the connection string when creating
a :class:`~couchbase.bucket.Bucket`
.. code-block:: python
cb = Bucket('couchbase://localhost/default?fetch_mutation_tokens=true')
rvs = cb.upsert_multi({
'foo': {'type': 'user', 'value': 'a foo value'},
'bar': {'type': 'user', 'value': 'a bar value'}
})
nq = N1QLQuery('SELECT type, value FROM default WHERE type="user"')
ms = MutationToken()
ms.add_result(rv
nq.consistent_with_ops(*rvs.values())
for row in cb.n1ql_query(nq):
# ...
"""
def __init__(self):
self._sv = {}
def _add_scanvec(self, mutinfo):
"""
Internal method used to specify a scan vector.
:param mutinfo: A tuple in the form of
`(vbucket id, vbucket uuid, mutation sequence)`
"""
vb, uuid, seq, bktname = mutinfo
self._sv.setdefault(bktname, {})[vb] = (seq, str(uuid))
def encode(self):
"""
Encodes this state object to a string. This string may be passed
to the :meth:`decode` at a later time. The returned object is safe
for sending over the network.
:return: A serialized string representing the state
"""
return couchbase._to_json(self._sv)
@classmethod
def decode(cls, s):
"""
Create a :class:`MutationState` from the encoded string
:param s: The encoded string
:return: A new MutationState restored from the string
"""
d = couchbase._from_json(s)
o = MutationState()
o._sv = d
# TODO: Validate
def add_results(self, *rvs, **kwargs):
"""
Changes the state to reflect the mutation which yielded the given
result.
In order to use the result, the `fetch_mutation_tokens` option must
have been specified in the connection string, _and_ the result
must have been successful.
:param rvs: One or more :class:`~.OperationResult` which have been
returned from mutations
:param quiet: Suppress errors if one of the results does not
contain a convertible state.
:return: `True` if the result was valid and added, `False` if not
added (and `quiet` was specified
:raise: :exc:`~.MissingTokenError` if `result` does not contain
a valid token
"""
if not rvs:
raise MissingTokenError.pyexc(message='No results passed')
for rv in rvs:
mi = rv._mutinfo
if not mi:
if kwargs.get('quiet'):
return False
raise MissingTokenError.pyexc(
message='Result does not contain token')
self._add_scanvec(mi)
return True
def add_all(self, bucket, quiet=False):
"""
Ensures the query result is consistent with all prior
mutations performed by a given bucket.
Using this function is equivalent to keeping track of all
mutations performed by the given bucket, and passing them to
:meth:`~add_result`
:param bucket: A :class:`~couchbase.bucket.Bucket` object
used for the mutations
:param quiet: If the bucket contains no valid mutations, this
option suppresses throwing exceptions.
:return: `True` if at least one mutation was added, `False` if none
were added (and `quiet` was specified)
:raise: :exc:`~.MissingTokenError` if no mutations were added and
`quiet` was not specified
"""
added = False
for mt in bucket._mutinfo():
added = True
self._add_scanvec(mt)
if not added and not quiet:
raise MissingTokenError('Bucket object contains no tokens!')
return added
def __repr__(self):
return repr(self._sv)
def __nonzero__(self):
return bool(self._sv)
__bool__ = __nonzero__
class N1QLQuery(object):
def __init__(self, query, *args, **kwargs):
"""
Create an N1QL Query object. This may be passed as the
`params` argument to :class:`N1QLRequest`.
:param query: The query string to execute
:param args: Positional placeholder arguments. These satisfy
the placeholder values for positional placeholders in the
query string, such as ``$1``, ``$2`` and so on.
:param kwargs: Named placeholder arguments. These satisfy
named placeholders in the query string, such as
``$name``, ``$email`` and so on. For the placeholder
values, omit the leading sigil (``$``).
Use positional parameters::
q = N1QLQuery('SELECT * FROM `travel-sample` '
'WHERE type=$1 AND id=$2',
'airline', 0)
for row in cb.n1ql_query(q):
print 'Got', row
Use named parameters::
q = N1QLQuery('SELECT * FROM `travel-sample` '
'WHERE type=$type AND id=$id',
type='airline', id=0)
for row in cb.n1ql_query(q):
print 'Got', row
When using placeholders, ensure that the placeholder value is
the *unserialized* (i.e. native) Python value, not the JSON
serialized value. For example the query
``SELECT * FROM products WHERE tags IN ["sale", "clearance"]``
can be rewritten using placeholders:
Correct::
N1QLQuery('SELECT * FROM products WHERE tags IN $1',
['sale', 'clearance'])
Incorrect::
N1QLQuery('SELECT * FROM products WHERE tags IN $1',
"[\\"sale\\",\\"clearance\\"]")
Since the placeholders are serialized to JSON internally anyway.
"""
self._adhoc = True
self._body = {'statement': query}
if args:
self._add_pos_args(*args)
if kwargs:
self._set_named_args(**kwargs)
def _set_named_args(self, **kv):
"""
Set a named parameter in the query. The named field must
exist in the query itself.
:param kv: Key-Value pairs representing values within the
query. These values should be stripped of their leading
`$` identifier.
"""
for k in kv:
self._body['${0}'.format(k)] = kv[k]
return self
def _add_pos_args(self, *args):
"""
Set values for *positional* placeholders (``$1,$2,...``)
:param args: Values to be used
"""
arg_array = self._body.setdefault('args', [])
arg_array.extend(args)
def set_option(self, name, value):
"""
Set a raw option in the query. This option is encoded
as part of the query parameters without any client-side
verification. Use this for settings not directly exposed
by the Python client.
:param name: The name of the option
:param value: The value of the option
"""
self._body[name] = value
@property
def statement(self):
return self._body['statement']
@property
def consistency(self):
"""
Sets the consistency level.
:see: :data:`CONSISTENCY_NONE`, :data:`CONSISTENCY_REQUEST`
"""
return self._body.get('scan_consistency', CONSISTENCY_NONE)
@consistency.setter
def consistency(self, value):
self._body['scan_consistency'] = value
def consistent_with(self, state):
"""
Indicate that the query should be consistent with one or more
mutations.
:param state: The state of the mutations it should be consistent
with.
"""
if self.consistency not in (CONSISTENCY_NONE, 'at_plus'):
raise TypeError(
'consistent_with not valid with other consistency options')
if not state:
raise TypeError('Passed empty or invalid state', state)
self.consistency = 'at_plus'
self._body['scan_vectors'] = state._sv
# TODO: I really wish Sphinx were able to automatically
# document instance vars
@property
def adhoc(self):
"""
A non-`adhoc` query can be internally optimized so that repeated
executions of the same query can be quicker. If this query is issued
repeatedly in your application, then you should set this property to
`False`.
Note that this optimization involves an up-front "preparation"
cost, and should only be used for queries that are issued multiple
times.
"""
return self._adhoc
@adhoc.setter
def adhoc(self, arg):
self._adhoc = arg
@property
def timeout(self):
"""
Optional per-query timeout. If set, this will limit the amount
of time in which the query can be executed and waited for.
.. note::
The effective timeout for the query will be either this property
or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout`
property, whichever is *lower*.
.. seealso:: couchbase.bucket.Bucket.n1ql_timeout
"""
value = self._body.get('timeout', '0s')
value = value[:-1]
return float(value)
@timeout.setter
def timeout(self, value):
if not value:
self._body.pop('timeout', 0)
else:
value = float(value)
self._body['timeout'] = '{0}s'.format(value)
@property
def encoded(self):
"""
Get an encoded representation of the query.
This is used internally by the client, and can be useful
to debug queries.
"""
return json.dumps(self._body)
def __repr__(self):
return ('<{cls} stmt={stmt} at {oid}>'.format(
cls=self.__class__.__name__,
stmt=repr(self._body),
oid=id(self)))
class N1QLRequest(object):
def __init__(self, params, parent, row_factory=lambda x: x):
"""
Object representing the execution of the request on the
server.
.. warning::
You should typically not call this constructor by
yourself, rather use the :meth:`~.Bucket.n1ql_query`
method (or one of its async derivatives).
:param params: An :class:`N1QLQuery` object.
:param parent: The parent :class:`~.couchbase.bucket.Bucket` object
:param row_factory: Callable which accepts the raw dictionary
of each row, and can wrap them in a customized class.
The default is simply to return the dictionary itself.
To actually receive results of the query, iterate over this
object.
"""
if isinstance(params, basestring):
params = N1QLQuery(params)
self._params = params
self._parent = parent
self.row_factory = row_factory
self.errors = []
self._mres = None
self._do_iter = True
self.__raw = False
self.__meta_received = False
def _start(self):
if self._mres:
return
self._mres = self._parent._n1ql_query(self._params.encoded,
not self._params.adhoc)
self.__raw = self._mres[None]
@property
def raw(self):
return self.__raw
@property
def meta(self):
"""
Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata
"""
if not self.__meta_received:
raise RuntimeError(
'This property only valid once all rows are received!')
if isinstance(self.raw.value, dict):
return self.raw.value
return {}
def _clear(self):
del self._parent
del self._mres
def _handle_meta(self, value):
self.__meta_received = True
if not isinstance(value, dict):
return
if 'errors' in value:
for err in value['errors']:
raise N1QLError.pyexc('N1QL Execution failed', err)
def _process_payload(self, rows):
if rows:
return [self.row_factory(row) for row in rows]
elif self.raw.done:
self._handle_meta(self.raw.value)
self._do_iter = False
return []
else:
# We can only get here if another concurrent query broke out the
# event loop before we did.
return []
def execute(self):
"""
Execute the statement and raise an exception on failure.
This method is useful for statements which modify data or
indexes, where the application does not need to extract any
data, but merely determine success or failure.
"""
for _ in self:
pass
return self
def get_single_result(self):
"""
Execute the statement and return its single result.
This should only be used on statements which are intended to
return only a single result.
:return: The single result, as encapsulated by the
`row_factory`
"""
for r in self:
return r
def __iter__(self):
if not self._do_iter:
raise AlreadyQueriedError()
self._start()
while self._do_iter:
raw_rows = self.raw.fetch(self._mres)
for row in self._process_payload(raw_rows):
yield row
|
{
"content_hash": "e337717a502e91c1716d1b45da4d362b",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 79,
"avg_line_length": 31.796747967479675,
"alnum_prop": 0.5865507542827921,
"repo_name": "mnunberg/couchbase-python-client",
"id": "6f2895646c3674fea270bd1e05c242934fdefe06",
"size": "16249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchbase/n1ql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "261401"
},
{
"name": "Python",
"bytes": "428174"
}
],
"symlink_target": ""
}
|
from django.conf import settings
import hashlib
import base64
from typing import Optional
def initial_password(email: str) -> Optional[str]:
"""Given an email address, returns the initial password for that account, as
created by populate_db."""
if settings.INITIAL_PASSWORD_SALT is not None:
encoded_key = (settings.INITIAL_PASSWORD_SALT + email).encode("utf-8")
digest = hashlib.sha256(encoded_key).digest()
return base64.b64encode(digest)[:16].decode('utf-8')
else:
# None as a password for a user tells Django to set an unusable password
return None
|
{
"content_hash": "72ff289188c3f62a7ec183a8cb2c26d8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 32.473684210526315,
"alnum_prop": 0.6936790923824959,
"repo_name": "tommyip/zulip",
"id": "4e2d5360f90fb3b355e3b3e6797baa7300c99305",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/initial_password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import re
from builtins import dict, int, str, super
from collections import namedtuple
import IPython.display as DISP
import matplotlib.pyplot as plt
import nbwavedrom
from future import standard_library
from tabulate import tabulate
from .trace import *
standard_library.install_aliases()
class PeekerBase(object):
peekers = dict() # Global list of all Peekers.
USE_JUPYTER = False
USE_WAVEDROM = False
unit_time = None # Time interval for a single tick-mark span.
def __new__(cls, *args, **kwargs):
# Keep PeekerBase from being instantiated.
if cls is PeekerBase:
raise TypeError("PeekerBase class may not be instantiated")
return object.__new__(cls)
def __init__(self, signal, name, **kwargs):
# Create storage for a signal trace.
self.trace = Trace()
# Configure the Peeker and its Trace instance.
self.config(**kwargs)
# Assign a unique name to this peeker.
self.name_dup = False # Start off assuming the name has no duplicates.
index = 0 # Starting index for disambiguating duplicates.
nm = "{name}[{index}]".format(**locals()) # Create name with bracketed index.
# Search through the peeker names for a match.
while nm in self.peekers:
# A match was found, so mark the matching names as duplicates.
self.peekers[nm].name_dup = True
self.name_dup = True
# Go to the next index and see if that name is taken.
index += 1
nm = "{name}[{index}]".format(**locals())
self.trace.name = nm # Assign the unique name.
# Keep a reference to the signal so we can get info about it later, if needed.
self.signal = signal
# Add this peeker to the global list.
self.peekers[self.trace.name] = self
@classmethod
def config_defaults(cls, **kwargs):
"""Setup options and shortcut functions."""
# Configure Trace defaults.
Trace.config_defaults(**kwargs)
global clear_traces, show_traces, show_waveforms, show_text_table, show_html_table, export_dataframe
cls.USE_WAVEDROM = kwargs.pop("use_wavedrom", cls.USE_WAVEDROM)
if cls.USE_WAVEDROM:
cls.show_waveforms = cls.to_wavedrom
cls.show_traces = traces_to_wavedrom
else:
cls.show_waveforms = cls.to_matplotlib
cls.show_traces = traces_to_matplotlib
# Create an intermediary function to call cls.show_waveforms and assign it to show_waveforms.
# Then if cls.show_waveforms is changed, any calls to show_waveforms will call the changed
# function. Directly assigning cls.show_waveforms to show_waveforms would mean any external
# code that calls show_waveforms() would always call the initially-assigned function even if
# cls.show_waveforms got a different assignment later.
def shw_wvfrms(*args, **kwargs):
return cls.show_waveforms(*args, **kwargs)
show_waveforms = shw_wvfrms
def shw_trcs(*args, **kwargs):
return cls.show_traces(*args, **kwargs)
show_traces = shw_trcs
# These class methods don't change as the options are altered, so just assign them
# to shortcuts without creating intermediary functions like above.
clear_traces = cls.clear_traces
export_dataframe = cls.to_dataframe
show_text_table = cls.to_text_table
show_html_table = cls.to_html_table
cls.USE_JUPYTER = kwargs.pop("use_jupyter", cls.USE_JUPYTER)
# Remaining keyword args.
for k, v in kwargs.items():
setattr(cls, k, copy(v))
def config(self, **kwargs):
"""
Set configuration for a particular Peeker.
"""
# Configure trace instance.
self.trace.config(**kwargs)
# Remaining keyword args.
for k, v in kwargs.items():
if isinstance(v, dict):
setattr(self, k, copy(getattr(self, k, {})))
getattr(self, k).update(v)
else:
setattr(self, k, copy(v))
@classmethod
def clear(cls):
"""Clear the global list of Peekers."""
cls.peekers = dict()
cls.unit_time = None
@classmethod
def clear_traces(cls):
"""Clear waveform samples from the global list of Peekers."""
for p in cls.peekers.values():
p.trace.clear()
cls.unit_time = None
@classmethod
def start_time(cls):
"""Return the time of the first signal transition captured by the peekers."""
return min((p.trace.start_time() for p in cls.peekers))
@classmethod
def stop_time(cls):
"""Return the time of the last signal transition captured by the peekers."""
return max((p.trace.stop_time() for p in cls.peekers))
@classmethod
def _clean_names(cls):
"""
Remove indices from non-repeated peeker names that don't need them.
When created, all peekers get an index appended to their name to
disambiguate any repeated names. If the name isn't actually repeated,
then the index is removed.
"""
index_re = "\[\d+\]$"
for name, peeker in list(cls.peekers.items()):
if not peeker.name_dup:
# Base name is not repeated, so remove any index.
new_name = re.sub(index_re, "", name)
if new_name != name:
# Index got removed so name changed. Therefore,
# remove the original entry and replace with
# the renamed Peeker.
cls.peekers.pop(name)
peeker.trace.name = new_name
cls.peekers[new_name] = peeker
@classmethod
def to_dataframe(cls, *names, **kwargs):
"""
Convert traces stored in peekers into a Pandas DataFrame of times and trace values.
Args:
*names: A list of strings containing the names for the Peekers that
will be processed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
A DataFrame with the columns for the named traces and time as the index.
"""
cls._clean_names()
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the traces for the Peekers matching the names.
traces = [getattr(cls.peekers.get(name), "trace", None) for name in names]
return traces_to_dataframe(*traces, **kwargs)
@classmethod
def to_table_data(cls, *names, **kwargs):
"""
Convert traces stored in peekers into a list of times and trace values.
Args:
*names: A list of strings containing the names for the Peekers that
will be processed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
List of lists containing the time and the value of each trace at that time.
"""
cls._clean_names()
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the traces for the Peekers matching the names.
traces = [getattr(cls.peekers.get(name), "trace", None) for name in names]
return traces_to_table_data(*traces, **kwargs)
@classmethod
def to_table(cls, *names, **kwargs):
format = kwargs.pop("format", "simple")
table_data, headers = cls.to_table_data(*names, **kwargs)
return tabulate(tabular_data=table_data, headers=headers, tablefmt=format)
@classmethod
def to_text_table(cls, *names, **kwargs):
if "format" not in kwargs:
kwargs["format"] = "simple"
print(cls.to_table(*names, **kwargs))
@classmethod
def to_html_table(cls, *names, **kwargs):
kwargs["format"] = "html"
tbl_html = cls.to_table(*names, **kwargs)
# Generate the HTML from the JSON.
DISP.display_html(DISP.HTML(tbl_html))
@classmethod
def get(cls, name):
"""Return the Peeker having the given name."""
cls._clean_names()
return cls.peekers.get(name)
@classmethod
def get_traces(cls):
"""Return a list of all the traces in the available Peekers."""
traces = [getattr(p, "trace", None) for p in cls.peekers.values()]
return [trc for trc in traces if trc is not None]
@classmethod
def to_matplotlib(cls, *names, **kwargs):
"""
Convert waveforms stored in peekers into a matplotlib plot.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
title_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
caption: String containing the title placed across the bottom of the display.
caption_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
grid_fmt (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
time_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
width: The width of the waveform display in inches.
height: The height of the waveform display in inches.
Returns:
Figure and axes created by matplotlib.pyplot.subplots.
"""
cls._clean_names()
if cls.unit_time is None:
cls.unit_time = calc_unit_time(*cls.get_traces())
Trace.unit_time = cls.unit_time
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the Peekers matching the names.
peekers = [cls.get(name) for name in names]
traces = [getattr(p, "trace", None) for p in peekers]
return traces_to_matplotlib(*traces, **kwargs)
@classmethod
def to_wavejson(cls, *names, **kwargs):
"""
Convert waveforms stored in peekers into a WaveJSON data structure.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
Returns:
A dictionary with the JSON data for the waveforms.
"""
cls._clean_names()
if cls.unit_time is None:
cls.unit_time = calc_unit_time(*cls.get_traces())
Trace.unit_time = cls.unit_time
if names:
# Go through the provided names and split any containing spaces
# into individual names.
names = [nm for name in names for nm in name.split()]
else:
# If no names provided, use all the peekers.
names = _sort_names(cls.peekers.keys())
# Collect all the Peekers matching the names.
peekers = [cls.peekers.get(name) for name in names]
traces = [getattr(p, "trace", None) for p in peekers]
return traces_to_wavejson(*traces, **kwargs)
@classmethod
def to_wavedrom(cls, *names, **kwargs):
"""
Display waveforms stored in peekers in Jupyter notebook.
Args:
*names: A list of strings containing the names for the Peekers that
will be displayed. A string may contain multiple,
space-separated names.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
width: The width of the waveform display in pixels.
skin: Selects the set of graphic elements used to create waveforms.
Returns:
Nothing.
"""
# Handle keyword args explicitly for Python 2 compatibility.
width = kwargs.get("width")
skin = kwargs.get("skin", "default")
if cls.USE_JUPYTER:
# Used with older Jupyter notebooks.
wavejson_to_wavedrom(
cls.to_wavejson(*names, **kwargs), width=width, skin=skin
)
else:
# Supports the new Jupyter Lab.
return nbwavedrom.draw(cls.to_wavejson(*names, **kwargs))
def delay(self, delta):
"""Return the trace data shifted in time by delta units."""
return self.trace.delay(delta)
def binarize(self):
"""Return trace of sample values set to 1 (if true) or 0 (if false)."""
return self.trace.binarize()
def __eq__(self, pkr):
return self.trace == pkr
def __ne__(self, pkr):
return self.trace != pkr
def __le__(self, pkr):
return self.trace <= pkr
def __ge__(self, pkr):
return self.trace >= pkr
def __lt__(self, pkr):
return self.trace < pkr
def __gt__(self, pkr):
return self.trace > pkr
def __add__(self, pkr):
return self.trace + pkr
def __sub__(self, pkr):
return self.trace - pkr
def __mul__(self, pkr):
return self.trace * pkr
def __floordiv__(self, pkr):
return self.trace // pkr
def __truediv__(self, pkr):
return self.trace / pkr
def __mod__(self, pkr):
return self.trace % pkr
def __lshift__(self, pkr):
return self.trace << pkr
def __rshift__(self, pkr):
return self.trace >> pkr
def __and__(self, pkr):
return self.trace & pkr
def __or__(self, pkr):
return self.trace | pkr
def __xor__(self, pkr):
return self.trace ^ pkr
def __pow__(self, pkr):
return self.trace ** pkr
def __pos__(self):
return +self.trace
def __neg__(self):
return -self.trace
def __not__(self):
return not self.trace
def __inv__(self):
return ~self.trace
def __abs__(self):
return abs(self.trace)
def trig_times(self):
"""Return list of times trace value is true (non-zero)."""
return self.trace.trig_times()
def _sort_names(names):
"""
Sort peeker names by index and alphabetically.
For example, the peeker names would be sorted as a[0], b[0], a[1], b[1], ...
"""
def index_key(lbl):
"""Index sorting."""
m = re.match(".*\[(\d+)\]$", lbl) # Get the bracketed index.
if m:
return int(m.group(1)) # Return the index as an integer.
return -1 # No index found so it comes before everything else.
def name_key(lbl):
"""Name sorting."""
m = re.match("^([^\[]+)", lbl) # Get name preceding bracketed index.
if m:
return m.group(1) # Return name.
return "" # No name found.
srt_names = sorted(names, key=name_key)
srt_names = sorted(srt_names, key=index_key)
return srt_names
|
{
"content_hash": "aa4b31994f26d95219024dc86a121e70",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 122,
"avg_line_length": 35.65261044176707,
"alnum_prop": 0.59892987890735,
"repo_name": "xesscorp/myhdlpeek",
"id": "32bb8780dcc72817350f75df4178790f4a456e25",
"size": "17932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myhdlpeek/peekerbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "595"
},
{
"name": "Makefile",
"bytes": "1710"
},
{
"name": "Python",
"bytes": "43195"
}
],
"symlink_target": ""
}
|
import pytest
from github3 import GitHubError
from github3.orgs import Team
from .helper import UnitHelper, UnitIteratorHelper, create_url_helper
url_for = create_url_helper('https://api.github.com/teams/10')
class TestTeam(UnitHelper):
described_class = Team
example_data = {
'url': 'https://api.github.com/teams/10',
'name': 'Owners',
'id': 10,
'permission': 'admin',
'members_count': 3,
'repos_count': 10,
'organization': {
'login': 'github',
'id': 1,
'url': 'https://api.github.com/orgs/github',
'avatar_url': 'https://github.com/images/error/octocat_happy.gif'
}
}
def test_add_member(self):
"""Show that one can add a member to an organization team."""
self.instance.add_member('user')
self.session.put.assert_called_once_with(url_for('members/user'))
def test_add_repository(self):
"""Show that one can add a repository to an organization team."""
self.instance.add_repository('name-of-repo')
self.session.put.assert_called_once_with(url_for('repos/name-of-repo'))
def test_delete(self):
"""Show that a user can delete an organization team."""
self.instance.delete()
self.session.delete.assert_called_once_with(url_for())
def test_edit(self):
"""Show that a user can edit a team."""
self.instance.edit('name', 'admin')
self.patch_called_with(url_for(),
data={'name': 'name', 'permission': 'admin'})
def test_has_repository(self):
"""Show that a user can check if a team has access to a repository."""
self.instance.has_repository('org/repo')
self.session.get.assert_called_once_with(url_for('repos/org/repo'))
def test_is_member(self):
"""Show that a user can check if another user is a team member."""
self.instance.is_member('username')
self.session.get.assert_called_once_with(url_for('members/username'))
def test_remove_member(self):
"""Show that a user can check if another user is a team member."""
self.instance.remove_member('username')
self.session.delete.assert_called_once_with(
url_for('members/username')
)
def test_remove_repository(self):
"""Show that a user can remove a repository from a team."""
self.instance.remove_repository('repo')
self.session.delete.assert_called_once_with(url_for('/repos/repo'))
class TestTeamRequiresAuth(UnitHelper):
described_class = Team
example_data = {
'url': 'https://api.github.com/teams/10',
'name': 'Owners',
'id': 10,
'permission': 'admin',
'members_count': 3,
'repos_count': 10,
'organization': {
'login': 'github',
'id': 1,
'url': 'https://api.github.com/orgs/github',
'avatar_url': 'https://github.com/images/error/octocat_happy.gif'
}
}
def after_setup(self):
"""Set up for test cases in TestTeamRequiresAuth."""
self.session.has_auth.return_value = False
def test_add_member_requires_auth(self):
"""Show that adding a repo to a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.add_member('user')
def test_add_repository_requires_auth(self):
"""Show that adding a repo to a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.add_repository('repo')
def test_delete_requires_auth(self):
"""Show that deleteing a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.delete()
def test_edit_requires_auth(self):
"""Show that editing a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.edit('name')
def test_has_repository_requires_auth(self):
"""Show that checking a team's access to a repo needs auth."""
with pytest.raises(GitHubError):
self.instance.has_repository('org/repo')
def test_is_member_requires_auth(self):
"""Show that checking a user's team membership requires auth."""
with pytest.raises(GitHubError):
self.instance.is_member('user')
def test_remove_member_requires_auth(self):
"""Show that removing a team member requires authentication."""
with pytest.raises(GitHubError):
self.instance.remove_member('user')
def test_remove_repository_requires_auth(self):
"""Show that removing a repo from a team requires authentication."""
with pytest.raises(GitHubError):
self.instance.remove_repository('repo')
class TestTeamIterator(UnitIteratorHelper):
described_class = Team
example_data = {
'url': url_for()
}
def test_members(self):
"""Show that one can iterate over all members of a Team."""
i = self.instance.members()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('members'),
params={'per_page': 100},
headers={}
)
def test_members_roles(self):
"""Show that one can iterate of all maintainers of a Team."""
i = self.instance.members(role='maintainer')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('members'),
params={'per_page': 100, 'role': 'maintainer'},
headers={'Accept': 'application/vnd.github.ironman-preview+json'}
)
def test_members_excludes_fake_roles(self):
"""Show that one cannot pass a bogus role to the API."""
i = self.instance.members(role='bogus-role')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('members'),
params={'per_page': 100},
headers={}
)
def test_members_requires_auth(self):
"""Show that one needs to authenticate to get team members."""
self.session.has_auth.return_value = False
with pytest.raises(GitHubError):
self.instance.members()
def test_repositories(self):
"""Show that one can iterate over an organization's repositories."""
i = self.instance.repositories()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('repos'),
params={'per_page': 100},
headers={'Accept': 'application/vnd.github.ironman-preview+json'}
)
|
{
"content_hash": "8b16abb3bdef5b5450881028275ca42f",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 33.59090909090909,
"alnum_prop": 0.6039693279206134,
"repo_name": "h4ck3rm1k3/github3.py",
"id": "d4f6d027e36269c167076aed6a5c302d84168c41",
"size": "6651",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/unit/test_orgs_team.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "386"
},
{
"name": "Python",
"bytes": "682251"
}
],
"symlink_target": ""
}
|
from openstack.tests.unit import base
from openstack.network.v2 import service_provider
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'service_type': 'L3_ROUTER_NAT',
'name': '4',
'default': False,
}
class TestServiceProvider(base.TestCase):
def test_basic(self):
sot = service_provider.ServiceProvider()
self.assertEqual('service_providers', sot.resources_key)
self.assertEqual('/service-providers', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = service_provider.ServiceProvider(**EXAMPLE)
self.assertEqual(EXAMPLE['service_type'], sot.service_type)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['default'], sot.is_default)
|
{
"content_hash": "c9d6b83895a9554b55aade39d8970414",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 31.1,
"alnum_prop": 0.6784565916398714,
"repo_name": "stackforge/python-openstacksdk",
"id": "07cdce7278011971593ac43d66da915cfd025b14",
"size": "1479",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/network/v2/test_service_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1138292"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
import time
from annotypes import Anno, add_call_types
from malcolm.core import PartRegistrar
from malcolm.modules import builtin
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
with Anno("The demand value to move our counter motor to"):
ADemand = float
with Anno("The amount of time to get to the demand position"):
ADuration = float
# How long between ticks of the "motor" position while moving
UPDATE_TICK = 0.1
# We will set these attributes on the child block, so don't save them
@builtin.util.no_save("counter")
class CounterMovePart(builtin.parts.ChildPart):
"""Provides control of a `counter_block` within a `ManagerController`"""
def __init__(self, name: APartName, mri: AMri) -> None:
super().__init__(name, mri, stateful=False, initial_visibility=True)
def setup(self, registrar: PartRegistrar) -> None:
super().setup(registrar)
# Method
registrar.add_method_model(self.move, self.name + "Move", needs_context=True)
@add_call_types
def move(
self, context: builtin.hooks.AContext, demand: ADemand, duration: ADuration = 0
) -> None:
"""Move the counter to the demand value, taking duration seconds like
a motor would do"""
start = time.time()
child = context.block_view(self.mri)
distance = demand - child.counter.value
remaining = duration
# "Move" the motor, ticking at UPDATE_TICK rate
while remaining > 0:
child.counter.put_value(demand - distance * remaining / duration)
context.sleep(min(remaining, UPDATE_TICK))
remaining = start + duration - time.time()
# Final move to make sure we end up at the right place
child.counter.put_value(demand)
|
{
"content_hash": "b622f0a9edd4d79b6af1dd0a3004d2d0",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 36.84,
"alnum_prop": 0.6769815418023887,
"repo_name": "dls-controls/pymalcolm",
"id": "d68845c7dd41a643b7f3cc7d857ec7ccb51441f2",
"size": "1842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malcolm/modules/demo/parts/countermovepart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "549"
},
{
"name": "Python",
"bytes": "1583458"
},
{
"name": "Shell",
"bytes": "580"
}
],
"symlink_target": ""
}
|
import json
from string import Template
from textwrap import dedent
import subprocess
class heat_inventory:
# output keys
hadoop_master_public_key = "hadoop_master_public_ip"
hadoop_master_private_key = "hadoop_master_private_ip"
hadoop_datanode_public_key = "nodes_public_ips"
hadoop_datanode_private_key = "nodes_private_ips"
# template values
ansible_ssh_user = "debian"
ansible_ssh_private_key_file = "~/.ssh/hadoop.pem"
# templates
host_entry = Template('$ipaddress ansible_connection=ssh ansible_ssh_user=$ssh_user ansible_ssh_private_key_file=$private_key_file')
hosts_output = Template("""[hadoop-master]
$master_host
[hadoop-data]
$data_hosts
[hadoop-master:vars]
nodesfile=nodes-pro
[hadoop-data:vars]
nodesfile=nodes-pro""")
node_entry = Template(""" - hostname: $hostname
ip: $ipaddress""")
nodes_section = Template("""---
nodes:
$nodes
""")
nodes_sshkeyscan = Template('ssh-keyscan -t rsa $ipaddress >> ~/.ssh/known_hosts')
def __init__(self):
self.load_heat_output()
def load_heat_output(self):
self.heat_output = json.loads(subprocess.Popen("heat output-show hadoop-stack --all", shell=True, stdout=subprocess.PIPE).stdout.read())
def get_master_public_ip(self):
for output_item in self.heat_output:
if self.hadoop_master_public_key == output_item['output_key']:
return output_item['output_value']
def get_master_private_ip(self):
for output_item in self.heat_output:
if self.hadoop_master_private_key == output_item['output_key']:
return output_item['output_value']
def get_datanode_public_ips(self):
for output_item in self.heat_output:
if self.hadoop_datanode_public_key == output_item['output_key']:
return output_item['output_value']
def get_datanode_private_ips(self):
for output_item in self.heat_output:
if self.hadoop_datanode_private_key == output_item['output_key']:
return output_item['output_value']
# Ansible hosts file
def get_host_entry(self, ipaddress):
return self.host_entry.substitute(ipaddress=ipaddress, ssh_user=self.ansible_ssh_user, private_key_file=self.ansible_ssh_private_key_file)
def get_datanode_host_entries(self):
datanode_hosts = []
for datanode_host in self.get_datanode_public_ips():
datanode_hosts.append(self.get_host_entry(datanode_host[0]))
return "\n".join(datanode_hosts)
def get_hosts_output(self):
master_host = self.get_host_entry(self.get_master_public_ip())
datanode_hosts = self.get_datanode_host_entries()
return dedent(self.hosts_output.substitute(master_host=master_host, data_hosts=datanode_hosts))
# Ansible group_vars nodes
def get_node_entry(self, hostname, ipaddress):
return self.node_entry.substitute(hostname=hostname, ipaddress=ipaddress)
def get_nodes_entries(self):
nodes = []
nodes.append(self.get_node_entry('hadoop-master', self.get_master_private_ip()))
for node in self.get_datanode_private_ips():
nodes.append(self.get_node_entry(node[1], node[0]))
return "\n".join(nodes)
def get_nodes_output(self):
return self.nodes_section.substitute(nodes=self.get_nodes_entries())
def get_node_keyscan_script(self):
nodes = []
nodes.append(self.nodes_sshkeyscan.substitute(ipaddress=self.get_master_public_ip()))
for node in self.get_datanode_public_ips():
nodes.append(self.nodes_sshkeyscan.substitute(ipaddress=node[0]))
return "\n".join(nodes)
def main():
heat_inv = heat_inventory()
## print "hadoop master public IP: " + heat_inv.get_master_public_ip()
## print "hadoop master private IP: " + heat_inv.get_master_private_ip()
## print "hadoop datanode private IP: " + ', '.join(heat_inv.get_datanode_private_ips())
## print "hadoop datanode public IP: " + ', '.join(heat_inv.get_datanode_public_ips())
inventory_file = open('hosts-pro', 'w')
nodes_file = open('nodes-pro', 'w')
inventory_file.write(heat_inv.get_hosts_output())
nodes_file.write(heat_inv.get_nodes_output())
inventory_file.close()
nodes_file.close()
keyscan_script_file = open('scan-node-keys.sh', 'w')
keyscan_script_file.write(heat_inv.get_node_keyscan_script())
keyscan_script_file.close()
if __name__ == '__main__':
main()
|
{
"content_hash": "ac34326f3bc5542372173ae02fd95a50",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 151,
"avg_line_length": 37.68333333333333,
"alnum_prop": 0.6572313135780629,
"repo_name": "dwatrous/hadoop-multi-server-ansible",
"id": "e19e637fe57e4ae184d1851eabcfbe3b7690b00e",
"size": "4831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat-inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4831"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
}
|
from flask import Flask
app = Flask(__name__)
#register to it
import views,main
|
{
"content_hash": "442a4287de897d60dd7773b3f4d55b40",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 23,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.7195121951219512,
"repo_name": "seerjk/reboot06",
"id": "cd542d03e907d7497cf8b381e5a401dd9e25dd15",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shares_project/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208290"
},
{
"name": "HTML",
"bytes": "167578"
},
{
"name": "Python",
"bytes": "175876"
},
{
"name": "VimL",
"bytes": "9586"
}
],
"symlink_target": ""
}
|
"""
This module is intended to provide compilation support for rst. The intention
is to keep the required libraries all in one place to provide a deployable,
python 2.6 compatible, rst compiler.
"""
import docutils.core
import sys, os.path
def toHtml(text):
return docutils.core.publish_parts(source=text, writer_name='html')['html_body']
if __name__=="__main__":
if len(sys.argv) == 2:
if os.path.isfile(sys.argv[1]):
fp = open(sys.argv[1], 'r')
print toHtml(fp.read())
|
{
"content_hash": "8ebd6fa98d7a4450f8f14fb7b8922708",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 84,
"avg_line_length": 30.294117647058822,
"alnum_prop": 0.6601941747572816,
"repo_name": "fretboardfreak/code",
"id": "fbbebe27d3a32f8248f7eef9946f0f8d213a8301",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abandoned/cgiWebsite/rst.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "649"
},
{
"name": "C++",
"bytes": "1599"
},
{
"name": "CSS",
"bytes": "83144"
},
{
"name": "HTML",
"bytes": "11830"
},
{
"name": "Java",
"bytes": "379"
},
{
"name": "JavaScript",
"bytes": "19508"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "PHP",
"bytes": "3691"
},
{
"name": "Perl",
"bytes": "1063"
},
{
"name": "Python",
"bytes": "273951"
},
{
"name": "Shell",
"bytes": "81945"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import tempfile
INFER_IMPORT_DIR = \
os.path.dirname(os.path.realpath(__file__)) + '/sdk-module-lists'
INFER_IMPORT_PATH = INFER_IMPORT_DIR + '/infer-imports.py'
def printerr(message):
print(message, file=sys.stderr)
def fatal_error(message):
printerr(message)
sys.exit(1)
def escapeCmdArg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def check_call(cmd, cwd=None, env=os.environ, verbose=False, output=None):
if verbose:
print(' '.join([escapeCmdArg(arg) for arg in cmd]))
return subprocess.check_call(cmd, cwd=cwd, env=env,
stderr=None, stdout=output)
def check_output(cmd, verbose=False):
if verbose:
print(' '.join([escapeCmdArg(arg) for arg in cmd]))
return subprocess.check_output(cmd).strip()
def get_sdk_path(platform):
return check_output(['xcrun', '-sdk', platform, '-show-sdk-path'])
def prepare_module_list(platform, file, verbose, module_filter_flags,
include_fixed_modules):
cmd = [INFER_IMPORT_PATH, '-s', get_sdk_path(platform)]
cmd.extend(module_filter_flags)
if verbose:
cmd.extend(['--v'])
check_call(cmd, output=file)
# The fixed modules are all objc frameworks.
if not include_fixed_modules:
return
with open(INFER_IMPORT_DIR + '/fixed-modules-common.txt', 'r') as extra:
file.write(extra.read())
with open(INFER_IMPORT_DIR + '/fixed-modules-' + platform + '.txt',
'r') as extra:
file.write(extra.read())
def get_api_digester_path(tool_path):
if tool_path:
return tool_path
return check_output(['xcrun', '--find', 'swift-api-digester'])
def create_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
class DumpConfig:
def __init__(self, tool_path, platform):
target_map = {
'iphoneos': 'arm64-apple-ios13.0',
'macosx': 'x86_64-apple-macosx10.15',
'appletvos': 'arm64-apple-tvos13.0',
'watchos': 'armv7k-apple-watchos6.0',
}
self.tool_path = get_api_digester_path(tool_path)
self.platform = platform
self.target = target_map[platform]
self.sdk = get_sdk_path(platform)
self.frameworks = [
self.sdk + '/System/Library/Frameworks/',
os.path.realpath(self.sdk + '/../../Library/Frameworks/')]
def run(self, output, module, swift_ver, opts, verbose,
module_filter_flags, include_fixed_modules, separate_by_module):
cmd = [self.tool_path, '-sdk', self.sdk, '-target',
self.target, '-dump-sdk', '-module-cache-path',
'/tmp/ModuleCache', '-swift-version',
swift_ver, '-abort-on-module-fail']
for path in self.frameworks:
cmd.extend(['-iframework', path])
cmd.extend(['-' + o for o in opts])
if verbose:
cmd.extend(['-v'])
if module:
cmd.extend(['-module', module])
cmd.extend(['-o', output])
check_call(cmd, verbose=verbose)
else:
with tempfile.NamedTemporaryFile() as tmp:
prepare_module_list(self.platform, tmp, verbose,
module_filter_flags, include_fixed_modules)
if separate_by_module:
tmp.seek(0)
create_directory(output)
for module in [name.strip() for name in tmp.readlines()]:
dir_path = os.path.realpath(output + '/' + module)
file_path = os.path.realpath(dir_path + '/' +
self.platform + '.json')
create_directory(dir_path)
current_cmd = list(cmd)
current_cmd.extend(['-module', module])
current_cmd.extend(['-o', file_path])
check_call(current_cmd, verbose=verbose)
else:
cmd.extend(['-o', output])
cmd.extend(['-module-list-file', tmp.name])
check_call(cmd, verbose=verbose)
class DiagnoseConfig:
def __init__(self, tool_path):
self.tool_path = get_api_digester_path(tool_path)
def run(self, opts, before, after, output, verbose):
cmd = [self.tool_path, '-diagnose-sdk', '-input-paths', before,
'-input-paths', after, '-print-module']
if output:
cmd.extend(['-o', output])
cmd.extend(['-' + o for o in opts])
if verbose:
cmd.extend(['-v'])
check_call(cmd, verbose=verbose)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
A convenient wrapper for swift-api-digester.
''')
basic_group = parser.add_argument_group('Basic')
basic_group.add_argument('--tool-path', default=None, help='''
the path to a swift-api-digester; if not specified, the script will
use the one from the toolchain
''')
basic_group.add_argument('--action', default='', help='''
the action to perform for swift-api-digester
''')
basic_group.add_argument('--target', default=None, help='''
one of macosx, iphoneos, appletvos, and watchos
''')
basic_group.add_argument('--output', default=None, help='''
the output file of the module baseline should end with .json
''')
basic_group.add_argument('--swift-version', default='5', help='''
Swift version to use; default is 5
''')
basic_group.add_argument('--module', default=None, help='''
name of the module/framework to generate baseline, e.g. Foundation
''')
basic_group.add_argument('--module-filter', default='', help='''
the action to perform for swift-api-digester
''')
basic_group.add_argument('--opts', nargs='+', default=[], help='''
additional flags to pass to swift-api-digester
''')
basic_group.add_argument('--v',
action='store_true',
help='Process verbosely')
basic_group.add_argument('--dump-before',
action=None,
help='''
Path to the json file generated before change'
''')
basic_group.add_argument('--dump-after',
action=None,
help='''
Path to the json file generated after change
''')
basic_group.add_argument('--separate-by-module',
action='store_true',
help='When importing entire SDK, dump content '
'seprately by module names')
args = parser.parse_args(sys.argv[1:])
if args.action == 'dump':
if not args.target:
fatal_error("Need to specify --target")
if not args.output:
fatal_error("Need to specify --output")
if args.module_filter == '':
module_filter_flags = []
include_fixed_modules = True
elif args.module_filter == 'swift-frameworks-only':
module_filter_flags = ['--swift-frameworks-only']
include_fixed_modules = False
elif args.module_filter == 'swift-overlay-only':
module_filter_flags = ['--swift-overlay-only']
include_fixed_modules = False
else:
fatal_error("cannot recognize --module-filter")
runner = DumpConfig(tool_path=args.tool_path, platform=args.target)
runner.run(output=args.output, module=args.module,
swift_ver=args.swift_version, opts=args.opts,
verbose=args.v,
module_filter_flags=module_filter_flags,
include_fixed_modules=include_fixed_modules,
separate_by_module=args.separate_by_module)
elif args.action == 'diagnose':
if not args.dump_before:
fatal_error("Need to specify --dump-before")
if not args.dump_after:
fatal_error("Need to specify --dump-after")
runner = DiagnoseConfig(tool_path=args.tool_path)
runner.run(opts=args.opts, before=args.dump_before,
after=args.dump_after, output=args.output, verbose=args.v)
else:
fatal_error('Cannot recognize action: ' + args.action)
if __name__ == '__main__':
main()
|
{
"content_hash": "16c5ea1e422763d4554b00824e0c1169",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 35.717213114754095,
"alnum_prop": 0.5567412507171543,
"repo_name": "shahmishal/swift",
"id": "4b6e1fb0b195edce0fd4990ccbc70420b057aabd",
"size": "8738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/api_checker/swift-api-checker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12337"
},
{
"name": "C",
"bytes": "228905"
},
{
"name": "C++",
"bytes": "33424345"
},
{
"name": "CMake",
"bytes": "534257"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57265"
},
{
"name": "LLVM",
"bytes": "70517"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "420091"
},
{
"name": "Objective-C++",
"bytes": "248108"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1564446"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "229212"
},
{
"name": "Swift",
"bytes": "29078702"
},
{
"name": "Vim script",
"bytes": "16701"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.