text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Defines `{Additive,Multiplicative}SwapRegretOptimizer`s.
These optimizers minimize a `ConstrainedMinimizationProblem` by using a
swap-regret minimizing algorithm (either SGD or multiplicative weights) to learn
what weights should be associated with the objective function and constraints.
These algorithms do *not* use Lagrange multipliers, but the idea is similar.
The main differences between the formulation used here, and the standard
Lagrangian formulation, are that (i) the objective function is weighted, in
addition to the constraints, and (ii) we learn a matrix of weights, instead of a
vector.
For the purposes of constrained optimization, at least in theory,
external-regret minimization suffices if the `ConstrainedMinimizationProblem`
we're optimizing doesn't have any `proxy_constraints`, while swap-regret
minimization should be used if `proxy_constraints` are present.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
The formulation used by both of the SwapRegretOptimizers can be found in
Definition 2, and is discussed in Section 4. The
`MultiplicativeSwapRegretOptimizer` is most similar to Algorithm 2 in Section 4,
with the difference being that it uses `tf.train.Optimizer`s, instead of SGD,
for the "inner" updates. The `AdditiveSwapRegretOptimizer` differs further in
that it performs additive (instead of multiplicative) updates of the stochastic
matrix.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
import six
from tensorflow.contrib.constrained_optimization.python import constrained_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as train_optimizer
def _maximal_eigenvector_power_method(matrix,
epsilon=1e-6,
maximum_iterations=100):
"""Returns the maximal right-eigenvector of `matrix` using the power method.
Args:
matrix: 2D Tensor, the matrix of which we will find the maximal
right-eigenvector.
epsilon: nonnegative float, if two iterations of the power method differ (in
L2 norm) by no more than epsilon, we will terminate.
maximum_iterations: nonnegative int, if we perform this many iterations, we
will terminate.
Result:
The maximal right-eigenvector of `matrix`.
Raises:
ValueError: If the epsilon or maximum_iterations parameters violate their
bounds.
"""
if epsilon <= 0.0:
raise ValueError("epsilon must be strictly positive")
if maximum_iterations <= 0:
raise ValueError("maximum_iterations must be strictly positive")
def while_loop_condition(iteration, eigenvector, old_eigenvector):
"""Returns false if the while loop should terminate."""
not_done = (iteration < maximum_iterations)
not_converged = (standard_ops.norm(eigenvector - old_eigenvector) > epsilon)
return standard_ops.logical_and(not_done, not_converged)
def while_loop_body(iteration, eigenvector, old_eigenvector):
"""Performs one iteration of the power method."""
del old_eigenvector # Needed by the condition, but not the body.
iteration += 1
# We need to use tf.matmul() and tf.expand_dims(), instead of
# tf.tensordot(), since the former will infer the shape of the result, while
# the latter will not (tf.while_loop() needs the shapes).
new_eigenvector = standard_ops.matmul(
matrix, standard_ops.expand_dims(eigenvector, 1))[:, 0]
new_eigenvector /= standard_ops.norm(new_eigenvector)
return (iteration, new_eigenvector, eigenvector)
iteration = standard_ops.constant(0)
eigenvector = standard_ops.ones_like(matrix[:, 0])
eigenvector /= standard_ops.norm(eigenvector)
# We actually want a do-while loop, so we explicitly call while_loop_body()
# once before tf.while_loop().
iteration, eigenvector, old_eigenvector = while_loop_body(
iteration, eigenvector, eigenvector)
iteration, eigenvector, old_eigenvector = control_flow_ops.while_loop(
while_loop_condition,
while_loop_body,
loop_vars=(iteration, eigenvector, old_eigenvector),
name="power_method")
return eigenvector
def _project_stochastic_matrix_wrt_euclidean_norm(matrix):
"""Projects its argument onto the set of left-stochastic matrices.
This algorithm is O(n^3) at worst, where `matrix` is n*n. It can be done in
O(n^2 * log(n)) time by sorting each column (and maybe better with a different
algorithm), but the algorithm implemented here is easier to implement in
TensorFlow.
Args:
matrix: 2d square tensor, the matrix to project.
Returns:
The 2d square tensor that results from projecting `matrix` onto the set of
left-stochastic matrices w.r.t. the Euclidean norm applied column-wise
(i.e. the Frobenius norm).
Raises:
ValueError: if the `matrix` tensor does not have a fully-known shape, or is
not two-dimensional and square.
"""
matrix_shape = matrix.get_shape()
if matrix_shape is None:
raise ValueError("matrix must have known shape")
if matrix_shape.ndims != 2:
raise ValueError(
"matrix must be two dimensional (instead is %d-dimensional)" %
matrix_shape.ndims)
if matrix_shape[0] != matrix_shape[1]:
raise ValueError("matrix must be square (instead has shape (%d,%d))" %
(matrix_shape[0], matrix_shape[1]))
dimension = matrix_shape[0].value
if dimension is None:
raise ValueError("matrix must have fully-known shape")
def while_loop_condition(iteration, matrix, inactive, old_inactive):
"""Returns false if the while loop should terminate."""
del matrix # Needed by the body, but not the condition.
not_done = (iteration < dimension)
not_converged = standard_ops.reduce_any(
standard_ops.not_equal(inactive, old_inactive))
return standard_ops.logical_and(not_done, not_converged)
def while_loop_body(iteration, matrix, inactive, old_inactive):
"""Performs one iteration of the projection."""
del old_inactive # Needed by the condition, but not the body.
iteration += 1
scale = (1.0 - standard_ops.reduce_sum(
matrix, axis=0, keepdims=True)) / standard_ops.maximum(
1.0, standard_ops.reduce_sum(inactive, axis=0, keepdims=True))
matrix += scale * inactive
new_inactive = standard_ops.to_float(matrix > 0)
matrix *= new_inactive
return (iteration, matrix, new_inactive, inactive)
iteration = standard_ops.constant(0)
inactive = standard_ops.ones_like(matrix)
# We actually want a do-while loop, so we explicitly call while_loop_body()
# once before tf.while_loop().
iteration, matrix, inactive, old_inactive = while_loop_body(
iteration, matrix, inactive, inactive)
iteration, matrix, inactive, old_inactive = control_flow_ops.while_loop(
while_loop_condition,
while_loop_body,
loop_vars=(iteration, matrix, inactive, old_inactive),
name="euclidean_projection")
return matrix
def _project_log_stochastic_matrix_wrt_kl_divergence(log_matrix):
"""Projects its argument onto the set of log-left-stochastic matrices.
Args:
log_matrix: 2d square tensor, the element-wise logarithm of the matrix to
project.
Returns:
The 2d square tensor that results from projecting exp(`matrix`) onto the set
of left-stochastic matrices w.r.t. the KL-divergence applied column-wise.
"""
# For numerical reasons, make sure that the largest matrix element is zero
# before exponentiating.
log_matrix -= standard_ops.reduce_max(log_matrix, axis=0, keepdims=True)
log_matrix -= standard_ops.log(
standard_ops.reduce_sum(
standard_ops.exp(log_matrix), axis=0, keepdims=True))
return log_matrix
@six.add_metaclass(abc.ABCMeta)
class _SwapRegretOptimizer(constrained_optimizer.ConstrainedOptimizer):
"""Base class representing a `_SwapRegretOptimizer`.
This class contains most of the logic for performing constrained optimization,
minimizing external regret for the constraints player. What it *doesn't* do is
keep track of the internal state (the stochastic matrix). Instead, the state
is accessed via the _initial_state(), _stochastic_matrix(),
_constraint_grad_and_var() and _projection_op() methods.
The reason for this is that we want to make it easy to implement different
representations of the internal state. For example, for additive updates, it's
most natural to store the stochastic matrix directly, whereas for
multiplicative updates, it's most natural to store its element-wise logarithm.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
The formulation used by `_SwapRegretOptimizer`s can be found in Definition 2,
and is discussed in Section 4. Such optimizers are most similar to Algorithm
2 in Section 4. Most notably, the internal state is a left-stochastic matrix
of shape (m+1,m+1), where m is the number of constraints.
"""
def __init__(self, optimizer, constraint_optimizer=None):
"""Constructs a new `_SwapRegretOptimizer`.
The difference between `optimizer` and `constraint_optimizer` (if the latter
is provided) is that the former is used for learning the model parameters,
while the latter us used for the update to the constraint/objective weight
matrix (the analogue of Lagrange multipliers). If no `constraint_optimizer`
is provided, then `optimizer` is used for both.
Args:
optimizer: tf.train.Optimizer, used to optimize the objective and
proxy_constraints portion of ConstrainedMinimizationProblem. If
constraint_optimizer is not provided, this will also be used to optimize
the Lagrange multiplier analogues.
constraint_optimizer: optional tf.train.Optimizer, used to optimize the
Lagrange multiplier analogues.
Returns:
A new `_SwapRegretOptimizer`.
"""
super(_SwapRegretOptimizer, self).__init__(optimizer=optimizer)
self._constraint_optimizer = constraint_optimizer
@property
def constraint_optimizer(self):
"""Returns the `tf.train.Optimizer` used for the matrix."""
return self._constraint_optimizer
@abc.abstractmethod
def _initial_state(self, num_constraints):
pass
@abc.abstractmethod
def _stochastic_matrix(self, state):
pass
def _distribution(self, state):
distribution = _maximal_eigenvector_power_method(
self._stochastic_matrix(state))
distribution = standard_ops.abs(distribution)
distribution /= standard_ops.reduce_sum(distribution)
return distribution
@abc.abstractmethod
def _constraint_grad_and_var(self, state, gradient):
pass
@abc.abstractmethod
def _projection_op(self, state, name=None):
pass
def minimize_constrained(self,
minimization_problem,
global_step=None,
var_list=None,
gate_gradients=train_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Returns an `Op` for minimizing the constrained problem.
The `optimizer` constructor parameter will be used to update the model
parameters, while the constraint/objective weight matrix (the analogue of
Lagrange multipliers) will be updated using `constrained_optimizer` (if
provided) or `optimizer` (if not). Whether the matrix updates are additive
or multiplicative depends on the derived class.
Args:
minimization_problem: ConstrainedMinimizationProblem, the problem to
optimize.
global_step: as in `tf.train.Optimizer`'s `minimize` method.
var_list: as in `tf.train.Optimizer`'s `minimize` method.
gate_gradients: as in `tf.train.Optimizer`'s `minimize` method.
aggregation_method: as in `tf.train.Optimizer`'s `minimize` method.
colocate_gradients_with_ops: as in `tf.train.Optimizer`'s `minimize`
method.
name: as in `tf.train.Optimizer`'s `minimize` method.
grad_loss: as in `tf.train.Optimizer`'s `minimize` method.
Returns:
TensorFlow Op.
"""
objective = minimization_problem.objective
constraints = minimization_problem.constraints
proxy_constraints = minimization_problem.proxy_constraints
if proxy_constraints is None:
proxy_constraints = constraints
# Flatten both constraints tensors to 1d.
num_constraints = minimization_problem.num_constraints
constraints = standard_ops.reshape(constraints, shape=(num_constraints,))
proxy_constraints = standard_ops.reshape(
proxy_constraints, shape=(num_constraints,))
# We use a lambda to initialize the state so that, if this function call is
# inside the scope of a tf.control_dependencies() block, the dependencies
# will not be applied to the initializer.
state = standard_ops.Variable(
lambda: self._initial_state(num_constraints),
trainable=False,
name="swap_regret_optimizer_state")
zero_and_constraints = standard_ops.concat(
(standard_ops.zeros((1,)), constraints), axis=0)
objective_and_proxy_constraints = standard_ops.concat(
(standard_ops.expand_dims(objective, 0), proxy_constraints), axis=0)
distribution = self._distribution(state)
loss = standard_ops.tensordot(distribution, objective_and_proxy_constraints,
1)
matrix_gradient = standard_ops.matmul(
standard_ops.expand_dims(zero_and_constraints, 1),
standard_ops.expand_dims(distribution, 0))
update_ops = []
if self.constraint_optimizer is None:
# If we don't have a separate constraint_optimizer, then we use
# self._optimizer for both the update of the model parameters, and that of
# the internal state.
grads_and_vars = self.optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
grads_and_vars.append(
self._constraint_grad_and_var(state, matrix_gradient))
update_ops.append(
self.optimizer.apply_gradients(grads_and_vars, name="update"))
else:
# If we have a separate constraint_optimizer, then we use self._optimizer
# for the update of the model parameters, and self._constraint_optimizer
# for that of the internal state.
grads_and_vars = self.optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
matrix_grads_and_vars = [
self._constraint_grad_and_var(state, matrix_gradient)
]
gradients = [
gradient for gradient, _ in grads_and_vars + matrix_grads_and_vars
if gradient is not None
]
with ops.control_dependencies(gradients):
update_ops.append(
self.optimizer.apply_gradients(grads_and_vars, name="update"))
update_ops.append(
self.constraint_optimizer.apply_gradients(
matrix_grads_and_vars, name="optimizer_state_update"))
with ops.control_dependencies(update_ops):
if global_step is None:
# If we don't have a global step, just project, and we're done.
return self._projection_op(state, name=name)
else:
# If we have a global step, then we need to increment it in addition to
# projecting.
projection_op = self._projection_op(state, name="project")
with ops.colocate_with(global_step):
global_step_op = state_ops.assign_add(
global_step, 1, name="global_step_increment")
return control_flow_ops.group(projection_op, global_step_op, name=name)
class AdditiveSwapRegretOptimizer(_SwapRegretOptimizer):
"""A `ConstrainedOptimizer` based on swap-regret minimization.
This `ConstrainedOptimizer` uses the given `tf.train.Optimizer`s to jointly
minimize over the model parameters, and maximize over constraint/objective
weight matrix (the analogue of Lagrange multipliers), with the latter
maximization using additive updates and an algorithm that minimizes swap
regret.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
The formulation used by this optimizer can be found in Definition 2, and is
discussed in Section 4. It is most similar to Algorithm 2 in Section 4, with
the differences being that it uses `tf.train.Optimizer`s, instead of SGD, for
the "inner" updates, and performs additive (instead of multiplicative) updates
of the stochastic matrix.
"""
def __init__(self, optimizer, constraint_optimizer=None):
"""Constructs a new `AdditiveSwapRegretOptimizer`.
Args:
optimizer: tf.train.Optimizer, used to optimize the objective and
proxy_constraints portion of ConstrainedMinimizationProblem. If
constraint_optimizer is not provided, this will also be used to optimize
the Lagrange multiplier analogues.
constraint_optimizer: optional tf.train.Optimizer, used to optimize the
Lagrange multiplier analogues.
Returns:
A new `AdditiveSwapRegretOptimizer`.
"""
# TODO(acotter): add a parameter determining the initial values of the
# matrix elements (like initial_multiplier_radius in
# MultiplicativeSwapRegretOptimizer).
super(AdditiveSwapRegretOptimizer, self).__init__(
optimizer=optimizer, constraint_optimizer=constraint_optimizer)
def _initial_state(self, num_constraints):
# For an AdditiveSwapRegretOptimizer, the internal state is a tensor of
# shape (m+1,m+1), where m is the number of constraints, representing a
# left-stochastic matrix.
dimension = num_constraints + 1
# Initialize by putting all weight on the objective, and none on the
# constraints.
return standard_ops.concat(
(standard_ops.ones(
(1, dimension)), standard_ops.zeros((dimension - 1, dimension))),
axis=0)
def _stochastic_matrix(self, state):
return state
def _constraint_grad_and_var(self, state, gradient):
# TODO(acotter): tf.colocate_with(), if colocate_gradients_with_ops is True?
return (-gradient, state)
def _projection_op(self, state, name=None):
with ops.colocate_with(state):
return state_ops.assign(
state,
_project_stochastic_matrix_wrt_euclidean_norm(state),
name=name)
class MultiplicativeSwapRegretOptimizer(_SwapRegretOptimizer):
"""A `ConstrainedOptimizer` based on swap-regret minimization.
This `ConstrainedOptimizer` uses the given `tf.train.Optimizer`s to jointly
minimize over the model parameters, and maximize over constraint/objective
weight matrix (the analogue of Lagrange multipliers), with the latter
maximization using multiplicative updates and an algorithm that minimizes swap
regret.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
The formulation used by this optimizer can be found in Definition 2, and is
discussed in Section 4. It is most similar to Algorithm 2 in Section 4, with
the difference being that it uses `tf.train.Optimizer`s, instead of SGD, for
the "inner" updates.
"""
def __init__(self,
optimizer,
constraint_optimizer=None,
minimum_multiplier_radius=1e-3,
initial_multiplier_radius=None):
"""Constructs a new `MultiplicativeSwapRegretOptimizer`.
Args:
optimizer: tf.train.Optimizer, used to optimize the objective and
proxy_constraints portion of ConstrainedMinimizationProblem. If
constraint_optimizer is not provided, this will also be used to optimize
the Lagrange multiplier analogues.
constraint_optimizer: optional tf.train.Optimizer, used to optimize the
Lagrange multiplier analogues.
minimum_multiplier_radius: float, each element of the matrix will be lower
bounded by `minimum_multiplier_radius` divided by one plus the number of
constraints.
initial_multiplier_radius: float, the initial value of each element of the
matrix associated with a constraint (i.e. excluding those elements
associated with the objective) will be `initial_multiplier_radius`
divided by one plus the number of constraints. Defaults to the value of
`minimum_multiplier_radius`.
Returns:
A new `MultiplicativeSwapRegretOptimizer`.
Raises:
ValueError: If the two radius parameters are inconsistent.
"""
super(MultiplicativeSwapRegretOptimizer, self).__init__(
optimizer=optimizer, constraint_optimizer=constraint_optimizer)
if (minimum_multiplier_radius <= 0.0) or (minimum_multiplier_radius >= 1.0):
raise ValueError("minimum_multiplier_radius must be in the range (0,1)")
if initial_multiplier_radius is None:
initial_multiplier_radius = minimum_multiplier_radius
elif (initial_multiplier_radius <
minimum_multiplier_radius) or (minimum_multiplier_radius > 1.0):
raise ValueError("initial_multiplier_radius must be in the range "
"[minimum_multiplier_radius,1]")
self._minimum_multiplier_radius = minimum_multiplier_radius
self._initial_multiplier_radius = initial_multiplier_radius
def _initial_state(self, num_constraints):
# For a MultiplicativeSwapRegretOptimizer, the internal state is a tensor of
# shape (m+1,m+1), where m is the number of constraints, representing the
# element-wise logarithm of a left-stochastic matrix.
dimension = num_constraints + 1
# Initialize by putting as much weight as possible on the objective, and as
# little as possible on the constraints.
log_initial_one = math.log(1.0 - (self._initial_multiplier_radius *
(dimension - 1) / (dimension)))
log_initial_zero = math.log(self._initial_multiplier_radius / dimension)
return standard_ops.concat(
(standard_ops.constant(
log_initial_one, dtype=dtypes.float32, shape=(1, dimension)),
standard_ops.constant(
log_initial_zero,
dtype=dtypes.float32,
shape=(dimension - 1, dimension))),
axis=0)
def _stochastic_matrix(self, state):
return standard_ops.exp(state)
def _constraint_grad_and_var(self, state, gradient):
# TODO(acotter): tf.colocate_with(), if colocate_gradients_with_ops is True?
return (-gradient, state)
def _projection_op(self, state, name=None):
with ops.colocate_with(state):
# Gets the dimension of the state (num_constraints + 1)--all of these
# assertions are of things that should be impossible, since the state
# passed into this method will have the same shape as that returned by
# _initial_state().
state_shape = state.get_shape()
assert state_shape is not None
assert state_shape.ndims == 2
assert state_shape[0] == state_shape[1]
dimension = state_shape[0].value
assert dimension is not None
minimum_log_multiplier = standard_ops.log(
self._minimum_multiplier_radius / standard_ops.to_float(dimension))
return state_ops.assign(
state,
standard_ops.maximum(
_project_log_stochastic_matrix_wrt_kl_divergence(state),
minimum_log_multiplier),
name=name)
|
{
"content_hash": "23e9be95255ddc6c1313410a49001e17",
"timestamp": "",
"source": "github",
"line_count": 581,
"max_line_length": 84,
"avg_line_length": 42.28743545611015,
"alnum_prop": 0.7031625218771623,
"repo_name": "manipopopo/tensorflow",
"id": "ff846b191a34e3f3b4aa35671ca22b96b963db80",
"size": "25258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "324704"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46405377"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "830061"
},
{
"name": "Jupyter Notebook",
"bytes": "2632416"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52525"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99271"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39882449"
},
{
"name": "Ruby",
"bytes": "551"
},
{
"name": "Shell",
"bytes": "447049"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""Python protoc plugin for Envoy APIs."""
import sys
from collections import namedtuple
from tools.api_proto_plugin import traverse
from google.protobuf.compiler import plugin_pb2
OutputDescriptor = namedtuple(
'OutputDescriptor',
[
# Output files are generated alongside their corresponding input .proto,
# with the output_suffix appended.
'output_suffix',
# The visitor factory is a function to create a visitor.Visitor defining
# the business logic of the plugin for the specific output descriptor.
'visitor_factory',
# FileDescriptorProto transformer; this is applied to the input
# before any output generation.
'xform',
# Supply --//tools/api_proto_plugin CLI args as a parameters dictionary
# to visitor_factory constructor and xform function?
'want_params',
])
def direct_output_descriptor(output_suffix, visitor, want_params=False):
return OutputDescriptor(
output_suffix, visitor, (lambda x, _: x) if want_params else lambda x: x, want_params)
# TODO(phlax): make this into a class
def plugin(output_descriptors, traverser=None):
"""Protoc plugin entry point.
This defines protoc plugin and manages the stdin -> stdout flow. An
api_proto_plugin is defined by the provided visitor.
See
http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/
for further details on protoc plugin basics.
Args:
output_descriptors: a list of OutputDescriptors.
"""
traverser = traverser or traverse.traverse_file
request = plugin_pb2.CodeGeneratorRequest()
request.ParseFromString(sys.stdin.buffer.read())
response = plugin_pb2.CodeGeneratorResponse()
# We use request.file_to_generate rather than request.file_proto here since we
# are invoked inside a Bazel aspect, each node in the DAG will be visited once
# by the aspect and we only want to generate docs for the current node.
for file_to_generate in request.file_to_generate:
# Find the FileDescriptorProto for the file we actually are generating.
file_proto = [pf for pf in request.proto_file if pf.name == file_to_generate][0]
for od in output_descriptors:
f = response.file.add()
f.name = f"{file_proto.name}{od.output_suffix}"
if request.HasField("parameter") and od.want_params:
params = dict(param.split('=') for param in request.parameter.split(','))
xformed_proto = od.xform(file_proto, params)
visitor_factory = od.visitor_factory(params)
else:
xformed_proto = od.xform(file_proto)
visitor_factory = od.visitor_factory()
f.content = traverser(xformed_proto, visitor_factory)
sys.stdout.buffer.write(response.SerializeToString())
|
{
"content_hash": "02c4be1cea34b64b8d71fbe31a1f16b0",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 94,
"avg_line_length": 40.74647887323944,
"alnum_prop": 0.6768060836501901,
"repo_name": "envoyproxy/envoy",
"id": "2ae9dfb44fc928b3d67d1a73c351f8b051fc5385",
"size": "2893",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/api_proto_plugin/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "439"
},
{
"name": "C",
"bytes": "54172"
},
{
"name": "C++",
"bytes": "36279350"
},
{
"name": "CSS",
"bytes": "884"
},
{
"name": "Dockerfile",
"bytes": "891"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Go",
"bytes": "558"
},
{
"name": "HTML",
"bytes": "582"
},
{
"name": "Java",
"bytes": "1309139"
},
{
"name": "JavaScript",
"bytes": "76"
},
{
"name": "Jinja",
"bytes": "46306"
},
{
"name": "Kotlin",
"bytes": "311319"
},
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "NASL",
"bytes": "327095"
},
{
"name": "Objective-C",
"bytes": "95941"
},
{
"name": "PureBasic",
"bytes": "472"
},
{
"name": "Python",
"bytes": "630897"
},
{
"name": "Ruby",
"bytes": "47"
},
{
"name": "Rust",
"bytes": "38041"
},
{
"name": "Shell",
"bytes": "194810"
},
{
"name": "Smarty",
"bytes": "3528"
},
{
"name": "Starlark",
"bytes": "2229814"
},
{
"name": "Swift",
"bytes": "307285"
},
{
"name": "Thrift",
"bytes": "748"
}
],
"symlink_target": ""
}
|
class SWIGInterfaceParser(object):
def __init__(self):
self.ignored = []
def parse(self, interface_path):
pass
|
{
"content_hash": "36a4d872fc4c9311f85606a287c486e0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 36,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.5955882352941176,
"repo_name": "rokups/Urho3D",
"id": "0e8273e3aa2a515999d3e98e80f26b8fd2c0e5cd",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/Tools/BindTool/swig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "14842"
},
{
"name": "C#",
"bytes": "643494"
},
{
"name": "C++",
"bytes": "7624116"
},
{
"name": "CMake",
"bytes": "285041"
},
{
"name": "GLSL",
"bytes": "151027"
},
{
"name": "HLSL",
"bytes": "175276"
},
{
"name": "HTML",
"bytes": "23451"
},
{
"name": "Java",
"bytes": "89032"
},
{
"name": "MAXScript",
"bytes": "94704"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Objective-C",
"bytes": "6549"
},
{
"name": "Python",
"bytes": "31194"
},
{
"name": "Shell",
"bytes": "27209"
}
],
"symlink_target": ""
}
|
import abc
import six
import eventlet
__all__ = [
'Sensor',
'PollingSensor'
]
@six.add_metaclass(abc.ABCMeta)
class BaseSensor(object):
"""
Base Sensor class - not to be instantiated directly.
"""
def __init__(self, sensor_service, config=None):
self._sensor_service = sensor_service
self._config = config or {}
@abc.abstractmethod
def setup(self):
"""
Run the sensor initialization / setup code (if any).
"""
pass
@abc.abstractmethod
def run(self):
"""
Run the sensor.
"""
pass
@abc.abstractmethod
def cleanup(self):
"""
Run the sensor cleanup code (if any).
"""
pass
@abc.abstractmethod
def add_trigger(self, trigger):
"""
Runs when trigger is created
"""
pass
@abc.abstractmethod
def update_trigger(self, trigger):
"""
Runs when trigger is updated
"""
pass
@abc.abstractmethod
def remove_trigger(self, trigger):
"""
Runs when trigger is deleted
"""
pass
class Sensor(BaseSensor):
"""
Base class to be inherited from by the passive sensors.
"""
@abc.abstractmethod
def run(self):
pass
class PollingSensor(BaseSensor):
"""
Base class to be inherited from by the active sensors.
Active sensors periodically poll a 3rd party system for new information.
"""
def __init__(self, sensor_service, config, poll_interval=5):
super(PollingSensor, self).__init__(sensor_service=sensor_service, config=config)
self._poll_interval = poll_interval
@abc.abstractmethod
def poll(self):
"""
Poll 3rd party system for new information.
"""
pass
def run(self):
while True:
self.poll()
eventlet.sleep(self._poll_interval)
def get_poll_interval(self):
"""
Retrieve current poll interval.
:return: Current poll interval.
:rtype: ``float``
"""
return self._poll_interval
def set_poll_interval(self, poll_interval):
"""
Set the poll interval.
:param poll_interval: Poll interval to use.
:type poll_interval: ``float``
"""
self._poll_interval = poll_interval
|
{
"content_hash": "b96bb228a0307b68a5f75dec5a9af7e4",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 89,
"avg_line_length": 20.894736842105264,
"alnum_prop": 0.5646515533165407,
"repo_name": "jtopjian/st2",
"id": "9c801f5856ad2b19d2113333e80abefa20bf9f6a",
"size": "2382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2reactor/st2reactor/sensor/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "19687"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "1823869"
},
{
"name": "Shell",
"bytes": "7150"
}
],
"symlink_target": ""
}
|
import datetime
from itertools import chain
import re
from py4j.protocol import Py4JJavaError
from pyspark.sql import Row, Window
from pyspark.sql.functions import udf, input_file_name, col, percentile_approx, \
lit, assert_true, sum_distinct, sumDistinct, shiftleft, shiftLeft, shiftRight, \
shiftright, shiftrightunsigned, shiftRightUnsigned
from pyspark.testing.sqlutils import ReusedSQLTestCase
class FunctionsTests(ReusedSQLTestCase):
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
with self.tempView("temp"):
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(100)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 35)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot("a", u"b")).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot("a", 2)).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot(df.a, 2)).collect())
def test_inverse_trig_functions(self):
from pyspark.sql import functions
funs = [
(functions.acosh, "ACOSH"),
(functions.asinh, "ASINH"),
(functions.atanh, "ATANH"),
]
cols = ["a", functions.col("a")]
for f, alias in funs:
for c in cols:
self.assertIn(f"{alias}(a)", repr(f(c)))
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql import functions
from pyspark.sql.functions import col, lit
string_functions = [
"upper", "lower", "ascii",
"base64", "unbase64",
"ltrim", "rtrim", "trim"
]
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegex(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
for name in string_functions:
self.assertEqual(
df.select(getattr(functions, name)("name")).first()[0],
df.select(getattr(functions, name)(col("name"))).first()[0])
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, "1").alias('b')).collect()
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(TypeError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(TypeError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(TypeError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_sorting_functions_with_column(self):
from pyspark.sql import functions
from pyspark.sql.column import Column
funs = [
functions.asc_nulls_first, functions.asc_nulls_last,
functions.desc_nulls_first, functions.desc_nulls_last
]
exprs = [col("x"), "x"]
for fun in funs:
for expr in exprs:
res = fun(expr)
self.assertIsInstance(res, Column)
self.assertIn(
f"""'x {fun.__name__.replace("_", " ").upper()}'""",
str(res)
)
for expr in exprs:
res = functions.asc(expr)
self.assertIsInstance(res, Column)
self.assertIn(
"""'x ASC NULLS FIRST'""",
str(res)
)
for expr in exprs:
res = functions.desc(expr)
self.assertIsInstance(res, Column)
self.assertIn(
"""'x DESC NULLS LAST'""",
str(res)
)
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEqual(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEqual(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEqual(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEqual(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_input_file_name_reset_for_rdd(self):
rdd = self.sc.textFile('python/test_support/hello/hello.txt').map(lambda x: {'data': x})
df = self.spark.createDataFrame(rdd, "data STRING")
df.select(input_file_name().alias('file')).collect()
non_file_df = self.spark.range(100).select(input_file_name())
results = non_file_df.collect()
self.assertTrue(len(results) == 100)
# [SPARK-24605]: if everything was properly reset after the last job, this should return
# empty string rather than the file read in the last job.
for result in results:
self.assertEqual(result[0], '')
def test_slice(self):
from pyspark.sql.functions import lit, size, slice
df = self.spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
self.assertEqual(
df.select(slice(df.x, 2, 2).alias("sliced")).collect(),
df.select(slice(df.x, lit(2), lit(2)).alias("sliced")).collect(),
)
self.assertEqual(
df.select(slice(df.x, size(df.x) - 1, lit(1)).alias("sliced")).collect(),
[Row(sliced=[2]), Row(sliced=[4])]
)
self.assertEqual(
df.select(slice(df.x, lit(1), size(df.x) - 1).alias("sliced")).collect(),
[Row(sliced=[1, 2]), Row(sliced=[4])]
)
def test_array_repeat(self):
from pyspark.sql.functions import array_repeat, lit
df = self.spark.range(1)
self.assertEqual(
df.select(array_repeat("id", 3)).toDF("val").collect(),
df.select(array_repeat("id", lit(3))).toDF("val").collect(),
)
def test_input_file_name_udf(self):
df = self.spark.read.text('python/test_support/hello/hello.txt')
df = df.select(udf(lambda x: x)("value"), input_file_name().alias('file'))
file_name = df.collect()[0].file
self.assertTrue("python/test_support/hello/hello.txt" in file_name)
def test_overlay(self):
from pyspark.sql.functions import col, lit, overlay
from itertools import chain
import re
actual = list(chain.from_iterable([
re.findall("(overlay\\(.*\\))", str(x)) for x in [
overlay(col("foo"), col("bar"), 1),
overlay("x", "y", 3),
overlay(col("x"), col("y"), 1, 3),
overlay("x", "y", 2, 5),
overlay("x", "y", lit(11)),
overlay("x", "y", lit(2), lit(5)),
]
]))
expected = [
"overlay(foo, bar, 1, -1)",
"overlay(x, y, 3, -1)",
"overlay(x, y, 1, 3)",
"overlay(x, y, 2, 5)",
"overlay(x, y, 11, -1)",
"overlay(x, y, 2, 5)",
]
self.assertListEqual(actual, expected)
def test_percentile_approx(self):
actual = list(chain.from_iterable([
re.findall("(percentile_approx\\(.*\\))", str(x)) for x in [
percentile_approx(col("foo"), lit(0.5)),
percentile_approx(col("bar"), 0.25, 42),
percentile_approx(col("bar"), [0.25, 0.5, 0.75]),
percentile_approx(col("foo"), (0.05, 0.95), 100),
percentile_approx("foo", 0.5),
percentile_approx("bar", [0.1, 0.9], lit(10)),
]
]))
expected = [
"percentile_approx(foo, 0.5, 10000)",
"percentile_approx(bar, 0.25, 42)",
"percentile_approx(bar, array(0.25, 0.5, 0.75), 10000)",
"percentile_approx(foo, array(0.05, 0.95), 100)",
"percentile_approx(foo, 0.5, 10000)",
"percentile_approx(bar, array(0.1, 0.9), 10)"
]
self.assertListEqual(actual, expected)
def test_nth_value(self):
from pyspark.sql import Window
from pyspark.sql.functions import nth_value
df = self.spark.createDataFrame([
("a", 0, None),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, None),
("b", 1, None),
("b", 2, None)], schema=("key", "order", "value"))
w = Window.partitionBy("key").orderBy("order")
rs = df.select(
df.key,
df.order,
nth_value("value", 2).over(w),
nth_value("value", 2, False).over(w),
nth_value("value", 2, True).over(w)).collect()
expected = [
("a", 0, None, None, None),
("a", 1, "x", "x", None),
("a", 2, "x", "x", "y"),
("a", 3, "x", "x", "y"),
("a", 4, "x", "x", "y"),
("b", 1, None, None, None),
("b", 2, None, None, None)
]
for r, ex in zip(sorted(rs), sorted(expected)):
self.assertEqual(tuple(r), ex[:len(r)])
def test_higher_order_function_failures(self):
from pyspark.sql.functions import col, transform
# Should fail with varargs
with self.assertRaises(ValueError):
transform(col("foo"), lambda *x: lit(1))
# Should fail with kwargs
with self.assertRaises(ValueError):
transform(col("foo"), lambda **x: lit(1))
# Should fail with nullary function
with self.assertRaises(ValueError):
transform(col("foo"), lambda: lit(1))
# Should fail with quaternary function
with self.assertRaises(ValueError):
transform(col("foo"), lambda x1, x2, x3, x4: lit(1))
# Should fail if function doesn't return Column
with self.assertRaises(ValueError):
transform(col("foo"), lambda x: 1)
def test_nested_higher_order_function(self):
# SPARK-35382: lambda vars must be resolved properly in nested higher order functions
from pyspark.sql.functions import flatten, struct, transform
df = self.spark.sql("SELECT array(1, 2, 3) as numbers, array('a', 'b', 'c') as letters")
actual = df.select(flatten(
transform(
"numbers",
lambda number: transform(
"letters",
lambda letter: struct(number.alias("n"), letter.alias("l"))
)
)
)).first()[0]
expected = [(1, "a"), (1, "b"), (1, "c"),
(2, "a"), (2, "b"), (2, "c"),
(3, "a"), (3, "b"), (3, "c")]
self.assertEquals(actual, expected)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEqual(date(2017, 1, 22), parse_result['to_date(dateCol)'])
def test_assert_true(self):
from pyspark.sql.functions import assert_true
df = self.spark.range(3)
self.assertEqual(
df.select(assert_true(df.id < 3)).toDF("val").collect(),
[Row(val=None), Row(val=None), Row(val=None)],
)
with self.assertRaises(Py4JJavaError) as cm:
df.select(assert_true(df.id < 2, 'too big')).toDF("val").collect()
self.assertIn("java.lang.RuntimeException", str(cm.exception))
self.assertIn("too big", str(cm.exception))
with self.assertRaises(Py4JJavaError) as cm:
df.select(assert_true(df.id < 2, df.id * 1e6)).toDF("val").collect()
self.assertIn("java.lang.RuntimeException", str(cm.exception))
self.assertIn("2000000", str(cm.exception))
with self.assertRaises(TypeError) as cm:
df.select(assert_true(df.id < 2, 5))
self.assertEqual(
"errMsg should be a Column or a str, got <class 'int'>",
str(cm.exception)
)
def test_raise_error(self):
from pyspark.sql.functions import raise_error
df = self.spark.createDataFrame([Row(id="foobar")])
with self.assertRaises(Py4JJavaError) as cm:
df.select(raise_error(df.id)).collect()
self.assertIn("java.lang.RuntimeException", str(cm.exception))
self.assertIn("foobar", str(cm.exception))
with self.assertRaises(Py4JJavaError) as cm:
df.select(raise_error("barfoo")).collect()
self.assertIn("java.lang.RuntimeException", str(cm.exception))
self.assertIn("barfoo", str(cm.exception))
with self.assertRaises(TypeError) as cm:
df.select(raise_error(None))
self.assertEqual(
"errMsg should be a Column or a str, got <class 'NoneType'>",
str(cm.exception)
)
def test_sum_distinct(self):
self.spark.range(10).select(
assert_true(sum_distinct(col("id")) == sumDistinct(col("id")))).collect()
def test_shiftleft(self):
self.spark.range(10).select(
assert_true(shiftLeft(col("id"), 2) == shiftleft(col("id"), 2))).collect()
def test_shiftright(self):
self.spark.range(10).select(
assert_true(shiftRight(col("id"), 2) == shiftright(col("id"), 2))).collect()
def test_shiftrightunsigned(self):
self.spark.range(10).select(
assert_true(
shiftRightUnsigned(col("id"), 2) == shiftrightunsigned(col("id"), 2))).collect()
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_functions import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
{
"content_hash": "556761705dab17537ba3319c5b87e1b3",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 99,
"avg_line_length": 40.42688330871492,
"alnum_prop": 0.5407212539734736,
"repo_name": "milliman/spark",
"id": "082d61b732429e3f56e4b2e8e173fb33473e016e",
"size": "28154",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests/test_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "52464"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "24622"
},
{
"name": "Dockerfile",
"bytes": "9429"
},
{
"name": "HTML",
"bytes": "41560"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4316296"
},
{
"name": "JavaScript",
"bytes": "221431"
},
{
"name": "Jupyter Notebook",
"bytes": "4310524"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352905"
},
{
"name": "PowerShell",
"bytes": "3882"
},
{
"name": "Python",
"bytes": "7191174"
},
{
"name": "R",
"bytes": "1265563"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "27389"
},
{
"name": "Scala",
"bytes": "39048900"
},
{
"name": "Shell",
"bytes": "229968"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
}
|
import redis
# r_server = redis.Redis("211.155.92.154")
r_server = redis.Redis("121.40.202.173")
r_server.delete("esribbs:static")
with open("init-redis/data/esribbs.txt") as sites:
urls = sites.read().splitlines()
for url in urls:
r_server.sadd("esribbs:static", url)
r_server.delete("esribbs:start_urls")
urls = r_server.smembers("esribbs:static")
for url in urls:
r_server.lpush("esribbs:start_urls", url)
|
{
"content_hash": "227b9ee269dace06646a09c82df9648c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 50,
"avg_line_length": 28.8,
"alnum_prop": 0.6875,
"repo_name": "NoahKow/ziwuquan",
"id": "a70ee26814707abbb9d89c3df5ff4415ec9b1273",
"size": "451",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "crawler/init-redis/esri.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "105171"
},
{
"name": "Shell",
"bytes": "482"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from preprocessing import crop
MEAN = tf.constant([123.68, 116.78, 103.94], dtype=tf.float32) # IMAGENET
def preprocess_image(image, output_height, output_width, is_training=False):
# Crop
img_crop = crop.preprocess_image(image, output_height, output_width, is_training)
# Subtract the imagenet mean (mean over all imagenet images)
imgnet_mean = tf.reshape(MEAN, [1, 1, 3])
img_cast = tf.cast(img_crop, dtype=tf.float32)
img_standardized = tf.subtract(img_cast, imgnet_mean)
return img_standardized
|
{
"content_hash": "97a8397cbf53dddaefd69e433776a5ca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 37.53333333333333,
"alnum_prop": 0.7069271758436945,
"repo_name": "pjaehrling/finetuneAlexVGG",
"id": "0bba09656675dffa7760dff51cc21e2628d22a93",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocessing/imagenet/crop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1106751"
},
{
"name": "Python",
"bytes": "133828"
}
],
"symlink_target": ""
}
|
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exlusions.
"""
from . import exclusions, config
class Requirements(object):
def __init__(self, config):
self.config = config
@property
def db(self):
return config.db
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a SELECT."""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda: self.config.db.dialect.supports_empty_insert or \
self.config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda: self.config.db.dialect.implicit_returning,
"'returning' not supported by database"
)
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda: not self.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda: not self.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda: self.config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda: self.config.db.dialect.supports_sequences and \
self.config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_reflection(self):
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at all."""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
def _has_mysql_on_windows(self):
return False
def _has_mysql_fully_case_sensitive(self):
return False
|
{
"content_hash": "a913db7e7818185c4cba66de2e8175c6",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 89,
"avg_line_length": 27.87133182844244,
"alnum_prop": 0.6341621446505223,
"repo_name": "davidvon/pipa-pay-server",
"id": "f89d469eaf1ccb4290fdde2cbef7220d81927a03",
"size": "12347",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "site-packages/sqlalchemy/testing/requirements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "HTML",
"bytes": "73437"
},
{
"name": "JavaScript",
"bytes": "34960"
},
{
"name": "Nginx",
"bytes": "3074"
},
{
"name": "Python",
"bytes": "13078022"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.FortranCommon
Stuff for processing Fortran, common to all fortran dialects.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/FortranCommon.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import re
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
def isfortran(env, source):
"""Return 1 if any of code in source has fortran files in it, 0
otherwise."""
try:
fsuffixes = env['FORTRANSUFFIXES']
except KeyError:
# If no FORTRANSUFFIXES, no fortran tool, so there is no need to look
# for fortran sources.
return 0
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in fsuffixes:
return 1
return 0
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
print "Could not locate " + str(node.name)
return ([], [])
mod_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
cre = re.compile(mod_regex,re.M)
# Retrieve all USE'd module names
modules = cre.findall(node.get_text_contents())
# Remove unique items from the list
modules = SCons.Util.unique(modules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX', target=target, source=source)
moddir = env.subst('$FORTRANMODDIR', target=target, source=source)
modules = [x.lower() + suffix for x in modules]
for m in modules:
target.append(env.fs.File(m, moddir))
return (target, source)
def FortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.StaticObjectEmitter(target, source, env)
def ShFortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.SharedObjectEmitter(target, source, env)
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes)
def CreateDialectActions(dialect):
"""Create dialect specific actions."""
CompAction = SCons.Action.Action('$%sCOM ' % dialect, '$%sCOMSTR' % dialect)
CompPPAction = SCons.Action.Action('$%sPPCOM ' % dialect, '$%sPPCOMSTR' % dialect)
ShCompAction = SCons.Action.Action('$SH%sCOM ' % dialect, '$SH%sCOMSTR' % dialect)
ShCompPPAction = SCons.Action.Action('$SH%sPPCOM ' % dialect, '$SH%sPPCOMSTR' % dialect)
return CompAction, CompPPAction, ShCompAction, ShCompPPAction
def DialectAddToEnv(env, dialect, suffixes, ppsuffixes, support_module = 0):
"""Add dialect specific construction variables."""
ComputeFortranSuffixes(suffixes, ppsuffixes)
fscan = SCons.Scanner.Fortran.FortranScan("%sPATH" % dialect)
for suffix in suffixes + ppsuffixes:
SCons.Tool.SourceFileScanner.add_scanner(suffix, fscan)
env.AppendUnique(FORTRANSUFFIXES = suffixes + ppsuffixes)
compaction, compppaction, shcompaction, shcompppaction = \
CreateDialectActions(dialect)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in suffixes:
static_obj.add_action(suffix, compaction)
shared_obj.add_action(suffix, shcompaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
for suffix in ppsuffixes:
static_obj.add_action(suffix, compppaction)
shared_obj.add_action(suffix, shcompppaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
if '%sFLAGS' % dialect not in env:
env['%sFLAGS' % dialect] = SCons.Util.CLVar('')
if 'SH%sFLAGS' % dialect not in env:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
# If a tool does not define fortran prefix/suffix for include path, use C ones
if 'INC%sPREFIX' % dialect not in env:
env['INC%sPREFIX' % dialect] = '$INCPREFIX'
if 'INC%sSUFFIX' % dialect not in env:
env['INC%sSUFFIX' % dialect] = '$INCSUFFIX'
env['_%sINCFLAGS' % dialect] = '$( ${_concat(INC%sPREFIX, %sPATH, INC%sSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' % (dialect, dialect, dialect)
if support_module == 1:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
else:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
def add_fortran_to_env(env):
"""Add Builders and construction variables for Fortran to an Environment."""
try:
FortranSuffixes = env['FORTRANFILESUFFIXES']
except KeyError:
FortranSuffixes = ['.f', '.for', '.ftn']
#print "Adding %s to fortran suffixes" % FortranSuffixes
try:
FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']
except KeyError:
FortranPPSuffixes = ['.fpp', '.FPP']
DialectAddToEnv(env, "FORTRAN", FortranSuffixes,
FortranPPSuffixes, support_module = 1)
env['FORTRANMODPREFIX'] = '' # like $LIBPREFIX
env['FORTRANMODSUFFIX'] = '.mod' # like $LIBSUFFIX
env['FORTRANMODDIR'] = '' # where the compiler should place .mod files
env['FORTRANMODDIRPREFIX'] = '' # some prefix to $FORTRANMODDIR - similar to $INCPREFIX
env['FORTRANMODDIRSUFFIX'] = '' # some suffix to $FORTRANMODDIR - similar to $INCSUFFIX
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
def add_f77_to_env(env):
"""Add Builders and construction variables for f77 to an Environment."""
try:
F77Suffixes = env['F77FILESUFFIXES']
except KeyError:
F77Suffixes = ['.f77']
#print "Adding %s to f77 suffixes" % F77Suffixes
try:
F77PPSuffixes = env['F77PPFILESUFFIXES']
except KeyError:
F77PPSuffixes = []
DialectAddToEnv(env, "F77", F77Suffixes, F77PPSuffixes)
def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print "Adding %s to f90 suffixes" % F90Suffixes
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1)
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print "Adding %s to f95 suffixes" % F95Suffixes
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1)
def add_f03_to_env(env):
"""Add Builders and construction variables for f03 to an Environment."""
try:
F03Suffixes = env['F03FILESUFFIXES']
except KeyError:
F03Suffixes = ['.f03']
#print "Adding %s to f95 suffixes" % F95Suffixes
try:
F03PPSuffixes = env['F03PPFILESUFFIXES']
except KeyError:
F03PPSuffixes = []
DialectAddToEnv(env, "F03", F03Suffixes, F03PPSuffixes,
support_module = 1)
def add_f08_to_env(env):
"""Add Builders and construction variables for f08 to an Environment."""
try:
F08Suffixes = env['F08FILESUFFIXES']
except KeyError:
F08Suffixes = ['.f08']
try:
F08PPSuffixes = env['F08PPFILESUFFIXES']
except KeyError:
F08PPSuffixes = []
DialectAddToEnv(env, "F08", F08Suffixes, F08PPSuffixes,
support_module = 1)
def add_all_to_env(env):
"""Add builders and construction variables for all supported fortran
dialects."""
add_fortran_to_env(env)
add_f77_to_env(env)
add_f90_to_env(env)
add_f95_to_env(env)
add_f03_to_env(env)
add_f08_to_env(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "f5171373a9510892f56869f627149e7a",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 163,
"avg_line_length": 38.376344086021504,
"alnum_prop": 0.6636779676846922,
"repo_name": "pzajda/eloquence",
"id": "cf6486880096109cb13e13f4741bbd98d5926d84",
"size": "10707",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scons-local-2.5.0/SCons/Tool/FortranCommon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1927564"
},
{
"name": "Smarty",
"bytes": "226"
}
],
"symlink_target": ""
}
|
import hmac
from hashlib import sha256
from flask import current_app
from freight.testutils import TestCase
class WebhooksViewTest(TestCase):
def make_path(self, hook, action, app, env="production", digest=None):
key = f"{hook}/{action}/{app}/{env}"
if digest is None:
api_key = current_app.config["API_KEY"]
digest = hmac.new(
api_key.encode("utf8"), key.encode("utf8"), sha256
).hexdigest()
return f"/webhooks/{key}/{digest}/"
def test_bad_digest(self):
resp = self.client.post(
self.make_path("github", "deploy", "test", digest="xxxx")
)
assert resp.status_code == 403, resp.data
def test_invalid_hook(self):
resp = self.client.post(self.make_path("xxx", "deploy", "test"))
assert resp.status_code == 404, resp.data
def test_invalid_app(self):
resp = self.client.post(self.make_path("github", "deploy", "test"))
assert resp.status_code == 404, resp.data
|
{
"content_hash": "101685cc8c33bfe886183ce092831733",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 33.03225806451613,
"alnum_prop": 0.6015625,
"repo_name": "getsentry/freight",
"id": "9d74fad39ae76ddee3dac86d72398d8bffe7c6ec",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/web/test_webhooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3875"
},
{
"name": "HTML",
"bytes": "243"
},
{
"name": "JavaScript",
"bytes": "63952"
},
{
"name": "Less",
"bytes": "15455"
},
{
"name": "Makefile",
"bytes": "749"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "274562"
},
{
"name": "Ruby",
"bytes": "4941"
},
{
"name": "Shell",
"bytes": "864"
}
],
"symlink_target": ""
}
|
from enum import Enum
from typing import Callable
from cryptography.hazmat.primitives import hashes
from .exceptions import InvalidInput
class SignatureConstructionMethod(Enum):
"""
An enumeration of signature construction methods supported by SignXML, used to specify the method when signing.
See the list of signature types under `XML Signature Syntax and Processing Version 2.0, Definitions
<http://www.w3.org/TR/xmldsig-core2/#sec-Definitions>`_.
"""
enveloped = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
"""
The signature is over the XML content that contains the signature as an element. The content provides the root
XML document element. This is the most common XML signature type in modern applications.
"""
enveloping = "enveloping-signature"
"""
The signature is over content found within an Object element of the signature itself. The Object (or its
content) is identified via a Reference (via a URI fragment identifier or transform).
"""
detached = "detached-signature"
"""
The signature is over content external to the Signature element, and can be identified via a URI or
transform. Consequently, the signature is "detached" from the content it signs. This definition typically applies to
separate data objects, but it also includes the instance where the Signature and data object reside within the same
XML document but are sibling elements.
"""
class FragmentLookupMixin:
@classmethod
def from_fragment(cls, fragment):
for i in cls: # type: ignore
if i.value.endswith("#" + fragment):
return i
else:
raise InvalidInput(f"Unrecognized {cls.__name__} identifier fragment: {fragment}")
class InvalidInputErrorMixin:
@classmethod
def _missing_(cls, value):
raise InvalidInput(f"Unrecognized {cls.__name__}: {value}")
class DigestAlgorithm(FragmentLookupMixin, InvalidInputErrorMixin, Enum):
"""
An enumeration of digest algorithms supported by SignXML. See `RFC 9231
<https://www.rfc-editor.org/rfc/rfc9231.html>`_ and the `Algorithm Identifiers and Implementation Requirements
<http://www.w3.org/TR/xmldsig-core1/#sec-AlgID>`_ section of the XML Signature 1.1 standard for details.
"""
SHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
SHA224 = "http://www.w3.org/2001/04/xmldsig-more#sha224"
SHA384 = "http://www.w3.org/2001/04/xmldsig-more#sha384"
SHA256 = "http://www.w3.org/2001/04/xmlenc#sha256"
SHA512 = "http://www.w3.org/2001/04/xmlenc#sha512"
SHA3_224 = "http://www.w3.org/2007/05/xmldsig-more#sha3-224"
SHA3_256 = "http://www.w3.org/2007/05/xmldsig-more#sha3-256"
SHA3_384 = "http://www.w3.org/2007/05/xmldsig-more#sha3-384"
SHA3_512 = "http://www.w3.org/2007/05/xmldsig-more#sha3-512"
@property
def implementation(self) -> Callable:
"""
The cryptography callable that implements the specified algorithm.
"""
return digest_algorithm_implementations[self]
# TODO: check if padding errors are fixed by using padding=MGF1
class SignatureMethod(FragmentLookupMixin, InvalidInputErrorMixin, Enum):
"""
An enumeration of signature methods (also referred to as signature algorithms) supported by SignXML. See `RFC 9231
<https://www.rfc-editor.org/rfc/rfc9231.html>`_ and the `Algorithm Identifiers and Implementation Requirements
<http://www.w3.org/TR/xmldsig-core1/#sec-AlgID>`_ section of the XML Signature 1.1 standard for details.
"""
DSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#dsa-sha1"
HMAC_SHA1 = "http://www.w3.org/2000/09/xmldsig#hmac-sha1"
RSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
ECDSA_SHA1 = "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha1"
ECDSA_SHA224 = "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha224"
ECDSA_SHA256 = "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256"
ECDSA_SHA384 = "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha384"
ECDSA_SHA512 = "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512"
HMAC_SHA224 = "http://www.w3.org/2001/04/xmldsig-more#hmac-sha224"
HMAC_SHA256 = "http://www.w3.org/2001/04/xmldsig-more#hmac-sha256"
HMAC_SHA384 = "http://www.w3.org/2001/04/xmldsig-more#hmac-sha384"
HMAC_SHA512 = "http://www.w3.org/2001/04/xmldsig-more#hmac-sha512"
RSA_SHA224 = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha224"
RSA_SHA256 = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"
RSA_SHA384 = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha384"
RSA_SHA512 = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512"
RSA_PSS = "http://www.w3.org/2007/05/xmldsig-more#rsa-pss"
DSA_SHA256 = "http://www.w3.org/2009/xmldsig11#dsa-sha256"
ECDSA_SHA3_224 = "http://www.w3.org/2021/04/xmldsig-more#ecdsa-sha3-224"
ECDSA_SHA3_256 = "http://www.w3.org/2021/04/xmldsig-more#ecdsa-sha3-256"
ECDSA_SHA3_384 = "http://www.w3.org/2021/04/xmldsig-more#ecdsa-sha3-384"
ECDSA_SHA3_512 = "http://www.w3.org/2021/04/xmldsig-more#ecdsa-sha3-512"
EDDSA_ED25519 = "http://www.w3.org/2021/04/xmldsig-more#eddsa-ed25519"
EDDSA_ED448 = "http://www.w3.org/2021/04/xmldsig-more#eddsa-ed448"
class CanonicalizationMethod(InvalidInputErrorMixin, Enum):
"""
An enumeration of XML canonicalization methods (also referred to as canonicalization algorithms) supported by
SignXML. See `RFC 9231 <https://www.rfc-editor.org/rfc/rfc9231.html>`_ and the `Algorithm Identifiers and
Implementation Requirements <http://www.w3.org/TR/xmldsig-core1/#sec-AlgID>`_ section of the XML Signature 1.1
standard for details.
"""
CANONICAL_XML_1_0 = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
CANONICAL_XML_1_0_WITH_COMMENTS = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments"
CANONICAL_XML_1_1 = "http://www.w3.org/2006/12/xmlc14n11#"
CANONICAL_XML_1_1_DEPRECATED_URI = "http://www.w3.org/2006/12/xml-c14n11"
CANONICAL_XML_1_1_WITH_COMMENTS = "http://www.w3.org/2006/12/xmlc14n11#WithComments"
EXCLUSIVE_XML_CANONICALIZATION_1_0 = "http://www.w3.org/2001/10/xml-exc-c14n#"
EXCLUSIVE_XML_CANONICALIZATION_1_0_WITH_COMMENTS = "http://www.w3.org/2001/10/xml-exc-c14n#WithComments"
digest_algorithm_implementations = {
DigestAlgorithm.SHA1: hashes.SHA1,
DigestAlgorithm.SHA224: hashes.SHA224,
DigestAlgorithm.SHA384: hashes.SHA384,
DigestAlgorithm.SHA256: hashes.SHA256,
DigestAlgorithm.SHA512: hashes.SHA512,
DigestAlgorithm.SHA3_224: hashes.SHA3_224,
DigestAlgorithm.SHA3_256: hashes.SHA3_256,
DigestAlgorithm.SHA3_384: hashes.SHA3_384,
DigestAlgorithm.SHA3_512: hashes.SHA3_512,
SignatureMethod.DSA_SHA1: hashes.SHA1,
SignatureMethod.HMAC_SHA1: hashes.SHA1,
SignatureMethod.RSA_SHA1: hashes.SHA1,
SignatureMethod.ECDSA_SHA1: hashes.SHA1,
SignatureMethod.ECDSA_SHA224: hashes.SHA224,
SignatureMethod.ECDSA_SHA256: hashes.SHA256,
SignatureMethod.ECDSA_SHA384: hashes.SHA384,
SignatureMethod.ECDSA_SHA512: hashes.SHA512,
SignatureMethod.HMAC_SHA224: hashes.SHA224,
SignatureMethod.HMAC_SHA256: hashes.SHA256,
SignatureMethod.HMAC_SHA384: hashes.SHA384,
SignatureMethod.HMAC_SHA512: hashes.SHA512,
SignatureMethod.RSA_SHA224: hashes.SHA224,
SignatureMethod.RSA_SHA256: hashes.SHA256,
SignatureMethod.RSA_SHA384: hashes.SHA384,
SignatureMethod.RSA_SHA512: hashes.SHA512,
SignatureMethod.DSA_SHA256: hashes.SHA256,
SignatureMethod.ECDSA_SHA3_224: hashes.SHA3_224,
SignatureMethod.ECDSA_SHA3_256: hashes.SHA3_256,
SignatureMethod.ECDSA_SHA3_384: hashes.SHA3_384,
SignatureMethod.ECDSA_SHA3_512: hashes.SHA3_512,
SignatureMethod.EDDSA_ED25519: hashes.SHA512,
SignatureMethod.EDDSA_ED448: hashes.SHAKE256,
}
|
{
"content_hash": "c9494fd7bcca3916bf9403a4c4a64ab8",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 120,
"avg_line_length": 48.4320987654321,
"alnum_prop": 0.7165434616365026,
"repo_name": "XML-Security/signxml",
"id": "24fe6ca3028e5618a33f6eb47497d65874eb3549",
"size": "7846",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "signxml/algorithms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2319"
},
{
"name": "Makefile",
"bytes": "3710"
},
{
"name": "Python",
"bytes": "147482"
},
{
"name": "XSLT",
"bytes": "819"
}
],
"symlink_target": ""
}
|
import sys
from _lsprof import Profiler, profiler_entry
__all__ = ['profile', 'Stats']
def profile(f, *args, **kwds):
"""XXX docstring"""
p = Profiler()
p.enable(subcalls=True, builtins=True)
try:
f(*args, **kwds)
finally:
p.disable()
return Stats(p.getstats())
class Stats(object):
"""XXX docstring"""
def __init__(self, data):
self.data = data
def sort(self, crit="inlinetime"):
"""XXX docstring"""
if crit not in profiler_entry.__dict__:
raise ValueError("Can't sort by %s" % crit)
self.data.sort(key=lambda x: getattr(x, crit), reverse=True)
for e in self.data:
if e.calls:
e.calls.sort(key=lambda x: getattr(x, crit), reverse=True)
def pprint(self, top=None, file=None, limit=None, climit=None):
"""XXX docstring"""
if file is None:
file = sys.stdout
d = self.data
if top is not None:
d = d[:top]
cols = "% 12s %12s %11.4f %11.4f %s\n"
hcols = "% 12s %12s %12s %12s %s\n"
file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
"Inline(ms)", "module:lineno(function)"))
count = 0
for e in d:
file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
e.inlinetime, label(e.code)))
count += 1
if limit is not None and count == limit:
return
ccount = 0
if e.calls:
for se in e.calls:
file.write(cols % ("+%s" % se.callcount, se.reccallcount,
se.totaltime, se.inlinetime,
"+%s" % label(se.code)))
count += 1
ccount += 1
if limit is not None and count == limit:
return
if climit is not None and ccount == climit:
break
def freeze(self):
"""Replace all references to code objects with string
descriptions; this makes it possible to pickle the instance."""
# this code is probably rather ickier than it needs to be!
for i in range(len(self.data)):
e = self.data[i]
if not isinstance(e.code, str):
self.data[i] = type(e)((label(e.code),) + e[1:])
if e.calls:
for j in range(len(e.calls)):
se = e.calls[j]
if not isinstance(se.code, str):
e.calls[j] = type(se)((label(se.code),) + se[1:])
_fn2mod = {}
def label(code):
if isinstance(code, str):
return code
try:
mname = _fn2mod[code.co_filename]
except KeyError:
for k, v in list(sys.modules.iteritems()):
if v is None:
continue
if not hasattr(v, '__file__'):
continue
if not isinstance(v.__file__, str):
continue
if v.__file__.startswith(code.co_filename):
mname = _fn2mod[code.co_filename] = k
break
else:
mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename
return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
if __name__ == '__main__':
import os
sys.argv = sys.argv[1:]
if not sys.argv:
print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
sys.exit(2)
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
stats = profile(execfile, sys.argv[0], globals(), locals())
stats.sort()
stats.pprint()
|
{
"content_hash": "b46d4bb5800b74f5728088772eabb08d",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 33.5045045045045,
"alnum_prop": 0.49152998117773594,
"repo_name": "joewalnes/idea-community",
"id": "5235a598e7cdd4906468290e276f78fb0822f1fb",
"size": "3743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/mercurial/lsprof.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "387"
},
{
"name": "C",
"bytes": "136045"
},
{
"name": "C#",
"bytes": "103"
},
{
"name": "C++",
"bytes": "40449"
},
{
"name": "Emacs Lisp",
"bytes": "2507"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "361320"
},
{
"name": "Java",
"bytes": "89694599"
},
{
"name": "JavaScript",
"bytes": "978"
},
{
"name": "Objective-C",
"bytes": "1877"
},
{
"name": "PHP",
"bytes": "145"
},
{
"name": "Perl",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "1699274"
},
{
"name": "Shell",
"bytes": "6965"
},
{
"name": "VimL",
"bytes": "5950"
}
],
"symlink_target": ""
}
|
"""
Support for MySensors switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.mysensors/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components import mysensors
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
ATTR_IR_CODE = 'V_IR_SEND'
SERVICE_SEND_IR_CODE = 'mysensors_send_ir_code'
SEND_IR_CODE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_IR_CODE): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for switches."""
device_class_map = {
'S_DOOR': MySensorsSwitch,
'S_MOTION': MySensorsSwitch,
'S_SMOKE': MySensorsSwitch,
'S_LIGHT': MySensorsSwitch,
'S_LOCK': MySensorsSwitch,
'S_IR': MySensorsIRSwitch,
'S_BINARY': MySensorsSwitch,
'S_SPRINKLER': MySensorsSwitch,
'S_WATER_LEAK': MySensorsSwitch,
'S_SOUND': MySensorsSwitch,
'S_VIBRATION': MySensorsSwitch,
'S_MOISTURE': MySensorsSwitch,
'S_WATER_QUALITY': MySensorsSwitch,
}
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, device_class_map,
async_add_entities=async_add_entities)
async def async_send_ir_code_service(service):
"""Set IR code as device state attribute."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
ir_code = service.data.get(ATTR_IR_CODE)
devices = mysensors.get_mysensors_devices(hass, DOMAIN)
if entity_ids:
_devices = [device for device in devices.values()
if isinstance(device, MySensorsIRSwitch) and
device.entity_id in entity_ids]
else:
_devices = [device for device in devices.values()
if isinstance(device, MySensorsIRSwitch)]
kwargs = {ATTR_IR_CODE: ir_code}
for device in _devices:
await device.async_turn_on(**kwargs)
hass.services.async_register(
DOMAIN, SERVICE_SEND_IR_CODE, async_send_ir_code_service,
schema=SEND_IR_CODE_SERVICE_SCHEMA)
class MySensorsSwitch(mysensors.device.MySensorsEntity, SwitchDevice):
"""Representation of the value of a MySensors Switch child node."""
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def current_power_w(self):
"""Return the current power usage in W."""
set_req = self.gateway.const.SetReq
return self._values.get(set_req.V_WATT)
@property
def is_on(self):
"""Return True if switch is on."""
return self._values.get(self.value_type) == STATE_ON
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 1)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_ON
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 0)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_OFF
self.async_schedule_update_ha_state()
class MySensorsIRSwitch(MySensorsSwitch):
"""IR switch child class to MySensorsSwitch."""
def __init__(self, *args):
"""Set up instance attributes."""
super().__init__(*args)
self._ir_code = None
@property
def is_on(self):
"""Return True if switch is on."""
set_req = self.gateway.const.SetReq
return self._values.get(set_req.V_LIGHT) == STATE_ON
async def async_turn_on(self, **kwargs):
"""Turn the IR switch on."""
set_req = self.gateway.const.SetReq
if ATTR_IR_CODE in kwargs:
self._ir_code = kwargs[ATTR_IR_CODE]
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, self._ir_code)
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_LIGHT, 1)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = self._ir_code
self._values[set_req.V_LIGHT] = STATE_ON
self.async_schedule_update_ha_state()
# turn off switch after switch was turned on
await self.async_turn_off()
async def async_turn_off(self, **kwargs):
"""Turn the IR switch off."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_LIGHT, 0)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[set_req.V_LIGHT] = STATE_OFF
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._ir_code = self._values.get(self.value_type)
|
{
"content_hash": "b1645c150b3f0b8a055dda62197b932c",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 74,
"avg_line_length": 37.22666666666667,
"alnum_prop": 0.629297994269341,
"repo_name": "persandstrom/home-assistant",
"id": "20e50518df8e52cbf11b3f784db8a1623b90dd44",
"size": "5584",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "homeassistant/components/switch/mysensors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
import pytest
import serial
from pymeasure.adapters import SerialAdapter
def make_adapter(**kwargs):
return SerialAdapter(serial.serial_for_url("loop://", **kwargs))
@pytest.mark.parametrize("msg", ["OUTP\n", "POWER 22 dBm\n"])
def test_adapter_write(msg):
adapter = make_adapter(timeout=0.2)
adapter.write(msg)
assert(adapter.read() == msg)
@pytest.mark.parametrize("test_input,expected", [([1,2,3], b'OUTP#13\x01\x02\x03'),
(range(100), b'OUTP#3100'+bytes(range(100)))])
def test_adapter_write_binary_values(test_input, expected):
adapter = make_adapter(timeout=0.2)
adapter.write_binary_values("OUTP", test_input, datatype='B')
# Add 10 bytes more, just to check that no extra bytes are present
assert(adapter.connection.read(len(expected)+10) == expected)
|
{
"content_hash": "6e83b25416ec749edb8c9aad5b614684",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 95,
"avg_line_length": 36.82608695652174,
"alnum_prop": 0.6635182998819362,
"repo_name": "ralph-group/pymeasure",
"id": "53a6593c38119e24297f425f4745b790a7582206",
"size": "2004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/adapters/test_serial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "689774"
}
],
"symlink_target": ""
}
|
"""
This test checks that blocks containing segwit recovery transactions will be accepted,
that segwit recovery transactions are rejected from mempool acceptance (even with
-acceptnonstdtxn=1), and that segwit recovery transactions don't result in bans.
"""
import time
from typing import Optional, Sequence
from test_framework.blocktools import (
create_block,
create_coinbase,
make_conform_to_ctor,
)
from test_framework.messages import (
COIN,
CBlock,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
OP_EQUAL,
OP_HASH160,
OP_TRUE,
CScript,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
TEST_TIME = int(time.time())
# Error due to non clean stack
CLEANSTACK_ERROR = 'non-mandatory-script-verify-flag (Stack size must be exactly one after execution)'
RPC_CLEANSTACK_ERROR = CLEANSTACK_ERROR
EVAL_FALSE_ERROR = 'non-mandatory-script-verify-flag (Script evaluated without error but finished with a false/empty top stack elem'
RPC_EVAL_FALSE_ERROR = EVAL_FALSE_ERROR + "ent)"
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class SegwitRecoveryTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.tip_height = 0
# We have 2 nodes:
# 1) node_nonstd (nodes[0]) accepts non-standard txns. It does not
# accept Segwit recovery transactions, since it is included in
# standard flags, and transactions that violate these flags are
# never accepted into the mempool.
# 2) node_std (nodes[1]) doesn't accept non-standard txns and
# doesn't have us whitelisted. It's used to test for bans, as we
# connect directly to it via mininode and send a segwit spending
# txn. This transaction is non-standard. We check that sending
# this transaction doesn't result in a ban.
# Nodes are connected to each other, so node_std receives blocks and
# transactions that node_nonstd has accepted. Since we are checking
# that segwit spending txn are not resulting in bans, node_nonstd
# doesn't get banned when forwarding this kind of transactions to
# node_std.
self.extra_args = [['-whitelist=noban@127.0.0.1',
"-acceptnonstdtxn"],
["-acceptnonstdtxn=0"]]
def make_block(self, base_block: Optional[CBlock]) -> CBlock:
"""
Build a new block and return it.
Increment the tip_height counter.
If base_block is None, use the genesis block as base block.
"""
if base_block is None:
base_block_hash = self.genesis_hash
block_time = TEST_TIME
else:
base_block_hash = base_block.sha256
block_time = base_block.nTime + 1
# First create the coinbase
self.tip_height += 1
coinbase = create_coinbase(self.tip_height)
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
return block
def run_test(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
spendable_outputs = []
# shorthand
node_nonstd = self.nodes[0]
node_std = self.nodes[1]
peer_nonstd = node_nonstd.add_p2p_connection(P2PDataStore())
peer_std = node_std.add_p2p_connection(P2PDataStore())
# adds transactions to the block and updates state
def update_block(block: CBlock,
new_transactions: Sequence[CTransaction]):
block.vtx.extend(new_transactions)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Returns 2 transactions:
# 1) txfund: create outputs in segwit addresses
# 2) txspend: spends outputs from segwit addresses
def create_segwit_fund_and_spend_tx(spend, case0=False):
if not case0:
# Spending from a P2SH-P2WPKH coin,
# txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691
redeem_script0 = bytearray.fromhex(
'0014fcf9969ce1c98a135ed293719721fb69f0b686cb')
# Spending from a P2SH-P2WSH coin,
# txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f
redeem_script1 = bytearray.fromhex(
'0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4')
else:
redeem_script0 = bytearray.fromhex('51020000')
redeem_script1 = bytearray.fromhex('53020080')
redeem_scripts = [redeem_script0, redeem_script1]
# Fund transaction to segwit addresses
txfund = CTransaction()
txfund.vin = [CTxIn(COutPoint(spend.tx.sha256, spend.n))]
amount = (50 * COIN - 1000) // len(redeem_scripts)
for redeem_script in redeem_scripts:
txfund.vout.append(
CTxOut(amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL])))
txfund.rehash()
# Segwit spending transaction
# We'll test if a node that checks for standardness accepts this
# txn. It should fail exclusively because of the restriction in
# the scriptSig (non clean stack..), so all other characteristcs
# must pass standardness checks. For this reason, we create
# standard P2SH outputs.
txspend = CTransaction()
for i in range(len(redeem_scripts)):
txspend.vin.append(
CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]])))
txspend.vout = [CTxOut(50 * COIN - 2000,
CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))]
txspend.rehash()
return txfund, txspend
# Create a new block
block = self.make_block(base_block=None)
spendable_outputs.append(block)
peer_nonstd.send_blocks_and_test([block], node_nonstd)
# Now we need that block to mature so we can spend the coinbase.
matureblocks = []
for _ in range(199):
block = self.make_block(block)
matureblocks.append(block)
spendable_outputs.append(block)
peer_nonstd.send_blocks_and_test(matureblocks, node_nonstd)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for _ in range(100):
out.append(
PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0))
# Create segwit funding and spending transactions
txfund, txspend = create_segwit_fund_and_spend_tx(out[0])
txfund_case0, txspend_case0 = create_segwit_fund_and_spend_tx(
out[1], True)
# Mine txfund, as it can't go into node_std mempool because it's
# nonstandard.
block = self.make_block(block)
update_block(block, [txfund, txfund_case0])
peer_nonstd.send_blocks_and_test([block], node_nonstd)
# Check both nodes are synchronized before continuing.
self.sync_blocks()
# Check that upgraded nodes checking for standardness are not banning
# nodes sending segwit spending txns.
peer_nonstd.send_txs_and_test([txspend], node_nonstd, success=False,
reject_reason=CLEANSTACK_ERROR)
peer_nonstd.send_txs_and_test([txspend_case0], node_nonstd, success=False,
reject_reason=EVAL_FALSE_ERROR)
peer_std.send_txs_and_test([txspend], node_std, success=False,
reject_reason=CLEANSTACK_ERROR)
peer_std.send_txs_and_test([txspend_case0], node_std, success=False,
reject_reason=EVAL_FALSE_ERROR)
# Segwit recovery txns are never accepted into the mempool,
# as they are included in standard flags.
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_nonstd.sendrawtransaction, ToHex(txspend))
assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR,
node_nonstd.sendrawtransaction, ToHex(txspend_case0))
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_std.sendrawtransaction, ToHex(txspend))
assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR,
node_std.sendrawtransaction, ToHex(txspend_case0))
# Blocks containing segwit spending txns are accepted in both nodes.
block = self.make_block(block)
update_block(block, [txspend, txspend_case0])
peer_nonstd.send_blocks_and_test([block], node_nonstd)
self.sync_blocks()
if __name__ == '__main__':
SegwitRecoveryTest().main()
|
{
"content_hash": "22f2612ac0826a0aa0e386984cd51075",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 132,
"avg_line_length": 41.699551569506724,
"alnum_prop": 0.619528981610926,
"repo_name": "Bitcoin-ABC/bitcoin-abc",
"id": "e692ad9cabdbd06217a97a2abd48cbce7dc76a5e",
"size": "9503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/abc-segwit-recovery.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1160721"
},
{
"name": "C++",
"bytes": "9817660"
},
{
"name": "CMake",
"bytes": "195193"
},
{
"name": "CSS",
"bytes": "4284"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "HTML",
"bytes": "25754"
},
{
"name": "Java",
"bytes": "41238"
},
{
"name": "JavaScript",
"bytes": "2366459"
},
{
"name": "Kotlin",
"bytes": "3712"
},
{
"name": "M4",
"bytes": "31132"
},
{
"name": "Makefile",
"bytes": "100617"
},
{
"name": "Objective-C++",
"bytes": "5811"
},
{
"name": "PHP",
"bytes": "94504"
},
{
"name": "Perl",
"bytes": "4551"
},
{
"name": "PowerShell",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "2706993"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Ruby",
"bytes": "21108"
},
{
"name": "Rust",
"bytes": "54953"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "167526"
},
{
"name": "TypeScript",
"bytes": "66320"
}
],
"symlink_target": ""
}
|
"""
Script for running pytest tests.
This script is meant to be run in a separate process by a PyTestRunner.
It runs tests via the pytest framework and prints the results so that the
PyTestRunner can read them.
"""
# Standard library imports
import sys
# Third party imports
import pytest
# Local imports, needs to be relative otherwise it will fail if trying
# to execute in a different env with only spyder-kernel installed
try:
# this line is needed for the tests to succeed
from .zmqwriter import ZmqStreamWriter
except:
# this line is needed for the plugin to work
from zmqwriter import ZmqStreamWriter
class FileStub():
"""Stub for ZmqStreamWriter which instead writes to a file."""
def __init__(self, filename):
"""Constructor; connect to specified filename."""
self.file = open(filename, 'w')
def write(self, obj):
"""Write Python object to file."""
self.file.write(str(obj) + '\n')
def close(self):
"""Close file."""
self.file.close()
class SpyderPlugin():
"""Pytest plugin which reports in format suitable for Spyder."""
def __init__(self, writer):
"""Constructor."""
self.writer = writer
def initialize_logreport(self):
"""Reset accumulator variables."""
self.status = '---'
self.duration = 0
self.longrepr = []
self.sections = []
self.had_error = False
self.was_skipped = False
self.was_xfail = False
def pytest_report_header(self, config, startdir):
"""Called by pytest before any reporting."""
self.writer.write({
'event': 'config',
'rootdir': str(config.rootdir)
})
def pytest_collectreport(self, report):
"""Called by pytest after collecting tests from a file."""
if report.outcome == 'failed':
self.writer.write({
'event': 'collecterror',
'nodeid': report.nodeid,
'longrepr': str(report.longrepr)
})
def pytest_itemcollected(self, item):
"""Called by pytest when a test item is collected."""
self.writer.write({
'event': 'collected',
'nodeid': item.nodeid
})
def pytest_runtest_logstart(self, nodeid, location):
"""Called by pytest before running a test."""
self.writer.write({
'event': 'starttest',
'nodeid': nodeid
})
self.initialize_logreport()
def pytest_runtest_logreport(self, report):
"""Called by pytest when a phase of a test is completed."""
if report.when == 'call':
self.status = report.outcome
self.duration = report.duration
else:
if report.outcome == 'failed':
self.had_error = True
elif report.outcome == 'skipped':
self.was_skipped = True
if hasattr(report, 'wasxfail'):
self.was_xfail = True
self.longrepr.append(report.wasxfail if report.wasxfail else
'WAS EXPECTED TO FAIL')
self.sections = report.sections # already accumulated over phases
if report.longrepr:
first_msg_idx = len(self.longrepr)
if hasattr(report.longrepr, 'reprcrash'):
self.longrepr.append(report.longrepr.reprcrash.message)
if isinstance(report.longrepr, tuple):
self.longrepr.append(report.longrepr[2])
elif isinstance(report.longrepr, str):
self.longrepr.append(report.longrepr)
else:
self.longrepr.append(str(report.longrepr))
if report.outcome == 'failed' and report.when in (
'setup', 'teardown'):
self.longrepr[first_msg_idx] = '{} {}: {}'.format(
'ERROR at', report.when, self.longrepr[first_msg_idx])
def pytest_runtest_logfinish(self, nodeid, location):
"""Called by pytest when the entire test is completed."""
if self.was_xfail:
if self.status == 'passed':
self.status = 'xpassed'
else: # 'skipped'
self.status = 'xfailed'
elif self.was_skipped:
self.status = 'skipped'
data = {'event': 'logreport',
'outcome': self.status,
'witherror': self.had_error,
'sections': self.sections,
'duration': self.duration,
'nodeid': nodeid,
'filename': location[0],
'lineno': location[1]}
if self.longrepr:
msg_lines = self.longrepr[0].rstrip().splitlines()
data['message'] = msg_lines[0]
start_item = 1 if len(msg_lines) == 1 else 0
data['longrepr'] = '\n'.join(self.longrepr[start_item:])
self.writer.write(data)
def main(args):
"""Run pytest with the Spyder plugin."""
if args[1] == 'file':
writer = FileStub('pytestworker.log')
else:
writer = ZmqStreamWriter(int(args[1]))
result = pytest.main(args[2:], plugins=[SpyderPlugin(writer)])
writer.close()
return result
if __name__ == '__main__':
result = main(sys.argv)
sys.exit(result)
|
{
"content_hash": "2001ed24a84bf11a63e63ce13e75820a",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 74,
"avg_line_length": 34.15483870967742,
"alnum_prop": 0.5710238005289007,
"repo_name": "jitseniesen/spyder-unittest",
"id": "3553cec6f3ce2a2909425262958a71c752e50903",
"size": "5446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spyder_unittest/backend/workers/pytestworker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170686"
}
],
"symlink_target": ""
}
|
import unittest
from slovar import slovar
from prf.view import BaseView
from prf.request import PRFRequest
from prf.tests.prf_testcase import PrfTestCase
class TestView(PrfTestCase):
def request(self, url='/', method='GET', mime='application/json', params={}):
from pyramid import testing
req = testing.DummyRequest(path=url, params=params)
req.context = testing.DummyResource()
req.content_type=mime
req.params = slovar({'mixed': lambda: params})
req.accept = [mime]
req.method = method
return req
def test_pop_empty_dict(self):
request = self.request(params={'_pop_empty': 1})
view = BaseView({}, request)
result = view._process([{'a': 1, 'b': [], 'c': {'d': 2, 'e': ''}}], True)
assert result['data'] == [{'a': 1, 'c': {'d': 2}}]
def test_pop_empty_model(self):
request = self.request(params={'_pop_empty': 1})
view = BaseView({}, request)
d = slovar(a=1, b=[], c={'d': 2, 'e': ''})
result = view._process(d, False)
assert result['data'] == {'a': 1, 'c': {'d': 2}}
def test_no_pop_empty_dict(self):
request = self.request()
view = BaseView({}, request)
result = view._process([{'a': 1, 'b': [], 'c': {'d': 2, 'e': ''}}], True)
assert result['data'] == [{'a': 1, 'b': [], 'c': {'d': 2, 'e': ''}}]
def test_no_pop_empty_model(self):
request = self.request()
view = BaseView({}, request)
d = slovar(a=1, b=[], c={'d': 2, 'e': ''})
result = view._process(d, False)
assert result['data'] == {'a': 1, 'b': [], 'c': {'d': 2, 'e': ''}}
def test_params_prop(self):
import pytest
request = self.request(params={'a':1})
view = BaseView({}, request)
assert type(view._params) == slovar
view._params = {'b':1}
assert type(view._params) == slovar
with pytest.raises(ValueError):
view._params = 'whatever the fuck I want'
|
{
"content_hash": "efb42e3abedf85d0034751171cf75bd8",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 81,
"avg_line_length": 34.28813559322034,
"alnum_prop": 0.5338606030647554,
"repo_name": "vahana/prf",
"id": "cdd1bb51de7e271566a4453469e9e55bb53ccb6e",
"size": "2023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prf/tests/test_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167421"
}
],
"symlink_target": ""
}
|
from concurrence._event import event, version, method, has_next, next, loop, EventError
from concurrence._event import EV_TIMEOUT, EV_READ, EV_WRITE, EV_SIGNAL, EV_PERSIST
import os
try:
#try to import some extra features available from libevent14+
from concurrence._event14 import init
from concurrence._event14 import reinit
#monkey path fork, so that we reinit event after fork automatically
os_fork = os.fork
def fork():
pid = os_fork()
if pid == 0:
#we are child, we must reinit libevent
reinit()
return pid
os.fork = fork
except ImportError:
#fall back to <1.4 init
from concurrence._event import init
#monkey path fork, to warn that it does not work < libevent14
os_fork = os.fork
def fork():
raise EventError("fork does not work with libevent < 1.4")
os.fork = fork
def reinit():
raise EventError("unavailable with libevent < 1.4")
#make sure libevent is inited
init()
|
{
"content_hash": "d1baf196f9dc69388c300ae0386048b7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 28.685714285714287,
"alnum_prop": 0.6633466135458167,
"repo_name": "toymachine/concurrence",
"id": "4285dca973bd1b8f476d8277b0e7632cc80d084c",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/concurrence/event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42241"
},
{
"name": "JavaScript",
"bytes": "19227"
},
{
"name": "Python",
"bytes": "408521"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
}
|
from .base import *
from .native import *
|
{
"content_hash": "c16181a011340e8ce2e2e4cef46eef88",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 21,
"avg_line_length": 21,
"alnum_prop": 0.7142857142857143,
"repo_name": "fperignon/siconos",
"id": "df66ddefb0262a60a3c54dd4d3a557c00d7aec79",
"size": "43",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mechanics/swig/mechanics/collision/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2725"
},
{
"name": "C",
"bytes": "4861202"
},
{
"name": "C++",
"bytes": "9363519"
},
{
"name": "CMake",
"bytes": "514834"
},
{
"name": "CSS",
"bytes": "12791"
},
{
"name": "Dockerfile",
"bytes": "233"
},
{
"name": "Fortran",
"bytes": "2539066"
},
{
"name": "GAMS",
"bytes": "5614"
},
{
"name": "HTML",
"bytes": "4331842"
},
{
"name": "Makefile",
"bytes": "12197"
},
{
"name": "Nix",
"bytes": "3086"
},
{
"name": "Python",
"bytes": "1479527"
},
{
"name": "Shell",
"bytes": "50594"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import object
from builtins import str
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainUser',
'Author': ['@harmj0y'],
'Description': ('Query information for a given user or users in the specified domain. Part of PowerView.'),
'Software': 'S0194',
'Techniques': ['T1033'],
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Identity' : {
'Description' : 'A SamAccountName, DistinguishedName, SID, GUID, or a dns host name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'SPN' : {
'Description' : 'Switch. Only return user objects with non-null service principal names',
'Required' : False,
'Value' : ''
},
'AdminCount' : {
'Description' : 'Switch. Return users with \'(adminCount=1)\' (meaning are/were privileged).',
'Required' : False,
'Value' : ''
},
'AllowDelegation' : {
'Description' : 'Switch. Return user accounts that are not marked as \'sensitive and not allowed for delegation\'',
'Required' : False,
'Value' : ''
},
'TrustedToAuth' : {
'Description' : 'Switch. Return computer objects that are trusted to authenticate for other principals.',
'Required' : False,
'Value' : ''
},
'PreauthNotRequired' : {
'Description' : 'Switch. Return user accounts with "Do not require Kerberos preauthentication" set.',
'Required' : False,
'Value' : ''
},
'DisallowDelegation' : {
'Description' : 'Switch. Return user accounts that are marked as \'sensitive and not allowed for delegation\'',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'LDAPFilter' : {
'Description' : 'Specifies an LDAP query string that is used to filter Active Directory objects.',
'Required' : False,
'Value' : ''
},
'Properties' : {
'Description' : 'Specifies the properties of the output object to retrieve from the server.',
'Required' : False,
'Value' : ''
},
'SearchBase' : {
'Description' : 'The LDAP source to search through, e.g. "LDAP://OU=secret,DC=testlab,DC=local" Useful for OU queries.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'SecurityMasks' : {
'Description' : 'Specifies an option for examining security information of a directory object. One of "Dacl", "Group", "None", "Owner", "Sacl".',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : ''
},
'FindOne' : {
'Description' : 'Only return one result object.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
script = helpers.keyword_obfuscation(script)
return script
|
{
"content_hash": "ad960c4b37c3210dab5a316a601d3882",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 165,
"avg_line_length": 39.53157894736842,
"alnum_prop": 0.4643855678338437,
"repo_name": "byt3bl33d3r/Empire",
"id": "52daafe6655f523eabee65ceb1f23c73d1094792",
"size": "7511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/get_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
"""
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_selection import SelectFromModel
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
hasher.fit(X)
model = SelectFromModel(hasher, prefit=True)
X_transformed = model.transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
|
{
"content_hash": "7beb818ec961c0e6489ccaa9fea62f63",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 33.69444444444444,
"alnum_prop": 0.7136575982412751,
"repo_name": "massmutual/scikit-learn",
"id": "eef04ac3336c4cc228caf66fe77759949ad9871c",
"size": "3639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ensemble/plot_random_forest_embedding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394526"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "5634898"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
}
|
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from octs.app import create_app
from octs.database import db as _db
from octs.settings import TestConfig
from .factories import UserFactory
@pytest.yield_fixture(scope='function')
def app():
"""An application for the tests."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
"""A database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
"""A user for the tests."""
user = UserFactory(password='myprecious')
db.session.commit()
return user
|
{
"content_hash": "f207be09363213d72edde250c9618dc6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 46,
"avg_line_length": 19.08,
"alnum_prop": 0.6582809224318659,
"repo_name": "kaiueo/octs",
"id": "3a0f54572de4c5371eb456ab651ec5db2579e07c",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "935192"
},
{
"name": "HTML",
"bytes": "5319485"
},
{
"name": "JavaScript",
"bytes": "6355986"
},
{
"name": "PHP",
"bytes": "7682"
},
{
"name": "Python",
"bytes": "113291"
},
{
"name": "Shell",
"bytes": "526"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_compress import Compress
from flask_debugtoolbar import DebugToolbarExtension
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sqlalchemy.pool import NullPool
import logging
import sys
import os
import requests
HEROKU_APP_NAME = "paperbuzz-api"
# set up logging
# see http://wiki.pylonshq.com/display/pylonscookbook/Alternative+logging+configuration
logging.basicConfig(
stream=sys.stdout, level=logging.DEBUG, format="%(name)s - %(message)s"
)
logger = logging.getLogger("paperbuzz")
libraries_to_mum = [
"requests.packages.urllib3",
"requests_oauthlib",
"stripe",
"oauthlib",
"boto",
"newrelic",
"RateLimiter",
]
for a_library in libraries_to_mum:
the_logger = logging.getLogger(a_library)
the_logger.setLevel(logging.WARNING)
the_logger.propagate = True
requests.packages.urllib3.disable_warnings()
# error reporting with sentry
# sentry_sdk.init(dsn=os.environ.get("SENTRY_DSN"), integrations=[FlaskIntegration()])
app = Flask(__name__)
# database stuff
app.config[
"SQLALCHEMY_TRACK_MODIFICATIONS"
] = True # as instructed, to suppress warning
db_uri = os.getenv("DATABASE_URL")
if db_uri.startswith("postgres://"):
db_uri = db_uri.replace(
"postgres://", "postgresql://", 1
) # temp heroku sqlalchemy fix
app.config["SQLALCHEMY_DATABASE_URI"] = db_uri
app.config["SQLALCHEMY_ECHO"] = os.getenv("SQLALCHEMY_ECHO", False) == "True"
# from http://stackoverflow.com/a/12417346/596939
class NullPoolSQLAlchemy(SQLAlchemy):
def apply_driver_hacks(self, app, info, options):
options["poolclass"] = NullPool
return super(NullPoolSQLAlchemy, self).apply_driver_hacks(app, info, options)
db = NullPoolSQLAlchemy(app)
# do compression. has to be above flask debug toolbar so it can override this.
compress_json = os.getenv("COMPRESS_DEBUG", "False") == "True"
# set up Flask-DebugToolbar
if os.getenv("FLASK_DEBUG", False) == "True":
logger.info("Setting app.debug=True; Flask-DebugToolbar will display")
compress_json = False
app.debug = True
app.config["DEBUG"] = True
app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False
app.config["SQLALCHEMY_RECORD_QUERIES"] = True
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
toolbar = DebugToolbarExtension(app)
# gzip responses
Compress(app)
app.config["COMPRESS_DEBUG"] = compress_json
|
{
"content_hash": "c8f9d25354b3bc2db1c04fa00f070609",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 87,
"avg_line_length": 28.767441860465116,
"alnum_prop": 0.7219078415521423,
"repo_name": "Impactstory/paperbuzz-api",
"id": "ac6709dad0b8c1bd980dea52f63d24619e0b19c1",
"size": "2474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98480"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
}
|
"""This module contains an object that represents a Telegram WebhookInfo."""
from telegram import TelegramObject
class WebhookInfo(TelegramObject):
"""This object represents a Telegram WebhookInfo.
Attributes:
url (str): Webhook URL, may be empty if webhook is not set up.
has_custom_certificate (bool):
pending_update_count (int):
last_error_date (int):
last_error_message (str):
Args:
url (str): Webhook URL, may be empty if webhook is not set up.
has_custom_certificate (bool):
pending_update_count (int):
last_error_date (Optional[int]):
last_error_message (Optional[str]):
"""
def __init__(self,
url,
has_custom_certificate,
pending_update_count,
last_error_date=None,
last_error_message=None,
max_connections=None,
allowed_updates=None,
**kwargs):
# Required
self.url = url
self.has_custom_certificate = has_custom_certificate
self.pending_update_count = pending_update_count
self.last_error_date = last_error_date
self.last_error_message = last_error_message
self.max_connections = max_connections
self.allowed_updates = allowed_updates
@staticmethod
def de_json(data, bot):
"""
Args:
data (dict):
bot (telegram.Bot):
Returns:
telegram.WebhookInfo:
"""
if not data:
return None
return WebhookInfo(**data)
|
{
"content_hash": "47ea177c164df65716c333259d2c6c26",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 28.54385964912281,
"alnum_prop": 0.5685310387215734,
"repo_name": "thonkify/thonkify",
"id": "ac8c91886c4635ef3974b958342417e1a2080f9e",
"size": "2436",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/lib/telegram/webhookinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10460214"
},
{
"name": "Shell",
"bytes": "1470"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('codepot', '0008_auto_20150720_1430'),
('codepot', '0013_workshopmessage_created'),
]
operations = [
]
|
{
"content_hash": "e7bcf1a5c77b3d10e185d2d9af9b22a8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.6474820143884892,
"repo_name": "codepotpl/codepot-backend",
"id": "2e9601cf56b252330da4bede821bb7c6b4225996",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codepot/migrations/0014_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3921"
},
{
"name": "Python",
"bytes": "265410"
},
{
"name": "Shell",
"bytes": "2043"
}
],
"symlink_target": ""
}
|
import falcon
import logging
import importlib
import calplus.conf
LOG = logging.getLogger(__name__)
CONF = calplus.conf.CONF
class Request(falcon.Request):
pass
class Response(falcon.Response):
pass
class BaseMiddleware(object):
def process_request(self, req, resp):
"""Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
"""
pass
def process_resource(self, req, resp, resource, params):
"""Process the request after routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed.
params: A dict-like object representing any additional
params derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
pass
def process_response(self, req, resp, resource):
"""Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
"""
pass
class BaseResource(object):
"""Base class for CAL resources"""
def __init__(self, controller, *args, **kwargs):
self.controller = controller
self.req_ids = None
def on_get(self, req, resp, *args, **kwargs):
pass
def on_post(self, req, resp, *args, **kwargs):
pass
def on_put(self, req, resp, *args, **kwargs):
pass
def on_delete(self, req, resp, *args, **kwargs):
pass
class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (object,), {})):
pass
class BaseClient(Singleton):
"""Base Client
:params path: module path of driver, for e.x: 'calplus.v1.network.driver'
:params provider: provider for e.x: 'OpenStack'
:params cloud_config:
"""
def __init__(self, path, provider, cloud_config):
self.driver = None
self.set_driver(path, provider, cloud_config)
def set_driver(self, path, provider, cloud_config):
_provider = provider.lower()
module = importlib.import_module(path + '.' + _provider)
LOG.info('Use %s driver for client', _provider)
_driver = CONF.providers.driver_mapper[_provider]
self.driver = getattr(module, _driver)(cloud_config)
|
{
"content_hash": "bbdab84d87ef1a7c4f54519e9438af99",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 28.57758620689655,
"alnum_prop": 0.5840120663650076,
"repo_name": "cloudcomputinghust/CAL",
"id": "f37034d9d8355d00b5290d9fb8c60c25ad0291c1",
"size": "3315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calplus/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "243156"
}
],
"symlink_target": ""
}
|
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
output = check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc is not None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import errno
import fcntl
import pickle
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (_subprocess.GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Only SIGTERM is supported on Windows")
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
try:
try:
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread is not None:
os.dup2(p2cread, 0)
if c2pwrite is not None:
os.dup2(c2pwrite, 1)
if errwrite is not None:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
if p2cread is not None and p2cread not in (0,):
os.close(p2cread)
if c2pwrite is not None and c2pwrite not in (p2cread, 1):
os.close(c2pwrite)
if errwrite is not None and errwrite not in (p2cread, c2pwrite, 2):
os.close(errwrite)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
_eintr_retry_call(os.waitpid, self.pid, 0)
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
input_offset += os.write(fd, chunk)
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
|
{
"content_hash": "e849e5c482b96e477dae5a58638fe270",
"timestamp": "",
"source": "github",
"line_count": 1450,
"max_line_length": 128,
"avg_line_length": 35.67793103448276,
"alnum_prop": 0.5561440473198925,
"repo_name": "MalloyPower/parsing-python",
"id": "bdd116a103a59beca140439a88653364f9a49589",
"size": "52102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.7/Lib/subprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
"""Pytest-bdd pytest hooks."""
def pytest_bdd_before_scenario(request, feature, scenario):
"""Called before scenario is executed."""
def pytest_bdd_after_scenario(request, feature, scenario):
"""Called after scenario is executed."""
def pytest_bdd_before_step(request, feature, scenario, step, step_func):
"""Called before step function is set up."""
def pytest_bdd_before_step_call(request, feature, scenario, step, step_func, step_func_args):
"""Called before step function is executed."""
def pytest_bdd_after_step(request, feature, scenario, step, step_func, step_func_args):
"""Called after step function is successfully executed."""
def pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args, exception):
"""Called when step function failed to execute."""
def pytest_bdd_step_func_lookup_error(request, feature, scenario, step, exception):
"""Called when step lookup failed."""
@pytest.hookspec(firstresult=True)
def pytest_bdd_apply_tag(tag, function):
"""Apply a tag (from a ``.feature`` file) to the given scenario.
The default implementation does the equivalent of
``getattr(pytest.mark, tag)(function)``, but you can override this hook and
return ``True`` to do more sophisticated handling of tags.
"""
|
{
"content_hash": "e05b2824408fccb0188f5a36e8c2d9f5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 98,
"avg_line_length": 31.558139534883722,
"alnum_prop": 0.7148120854826824,
"repo_name": "pytest-dev/pytest-bdd",
"id": "9351b2e3097f75356dc17b1ff9bee6e7c2cc128b",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytest_bdd/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "620"
},
{
"name": "Python",
"bytes": "205757"
}
],
"symlink_target": ""
}
|
import time
''' Class for timing functions '''
class timer:
''' Object that tracks start and end time and prints elapsed time
Instantiating timer starts the clock and timer.stop() stops the clock
'''
def __init__(self, out='Wall Time ='):
self.start = time.time()
self.out = out
def stop(self):
elapsed = time.time() - self.start
fmt = '{:.2f}'
str = ''
if (elapsed < 60):
str = fmt.format(elapsed) + ' seconds'
elif (elapsed < 3600):
str = fmt.format(elapsed/60) + ' minutes'
elif (elapsed > 3600):
str = fmt.format(elapsed/3600) + ' hours'
print(self.out, str)
return elapsed, str
|
{
"content_hash": "4b69132e5a0b58aa8d6f7d0b253956fa",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 20.387096774193548,
"alnum_prop": 0.6281645569620253,
"repo_name": "smsolivier/VEF",
"id": "4dc0bb2cfb8ce83ca08c6a2674a35aff5998b3f8",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/Timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8044"
},
{
"name": "Python",
"bytes": "117935"
},
{
"name": "TeX",
"bytes": "367637"
}
],
"symlink_target": ""
}
|
"""A module for the Cloud SDK CLI tree external representation."""
import argparse
import textwrap
from googlecloudsdk.core.console import console_io
def _NormalizeDescription(description):
"""Normalizes description text.
argparse.SUPPRESS normalizes to None.
Args:
description: str, The text to be normalized.
Returns:
str, The normalized text.
"""
if description == argparse.SUPPRESS:
description = None
elif description:
description = textwrap.dedent(description)
return description or ''
class Flag(object):
"""Flag info.
Attributes:
type: str, The flag value type name {'bool', 'int', 'float', 'string'}.
name: str, The normalized flag name ('_' => '-').
hidden: bool, True if the flag is hidden.
value: str, The flag value documentation name.
countmin: int, The minimum number of flag values.
countmax: int, The maximum number of flag values, 0 for unlimited.
required: int, 1 if the flag must be specified, 0 otherwise.
description: str, The help text.
choices: list, The list of static choices.
default: (self.type), The default flag value or None if no default.
group: int, Mutually exclusive flag group id counting from 1, 0 if none.
resource: str, Flag value resource identifier.
"""
def __init__(self, name, description='', default=None):
self.type = 'string'
self.name = name
self.hidden = description == argparse.SUPPRESS
self.value = ''
self.countmin = 0
self.countmax = 0
self.required = 0
self.choices = []
self.default = default
self.description = _NormalizeDescription(description)
self.group = 0
self.resource = ''
class Positional(object):
"""Positional info.
Attributes:
name: str, The normalized name ('_' => '-').
value: str, The positional value documentation name.
countmin: int, The minimum number of positional values.
countmax: int, The maximum number of positional values.
required: int, 1 if the positional must be specified, 0 otherwise.
description: str, The help text.
resource: str, Positional value resource identifier.
"""
def __init__(self, name, description):
self.name = name
self.value = ''
self.countmin = 0
self.countmax = 0
self.capsule = ''
self.description = description
self.resource = ''
class Command(object):
"""Command and group info.
Attributes:
release: str, The command release name {'internal', 'alpha', 'beta', 'ga'}.
name: str, The normalized name ('_' => '-').
hidden: bool, True if the command is hidden.
capsule: str, The first line of the command docstring.
description: str, The second and following lines of the command docstring.
flags: {str:str}, Command flag dict, indexed by normalized flag name.
positionals: [str], Command positionals list.
sections: {str:str}, Optional section help dict, indexed by section name.
"""
def __init__(self, command, parent):
self.release = command.ReleaseTrack().id
self.name = command.name.replace('_', '-')
self.hidden = command.IsHidden()
self.flags = {}
self.positionals = []
self.sections = {}
parent_command = parent.name.replace('_', '-') if parent else ''
self.release, capsule = self.__Release(
command, self.release, getattr(command, 'short_help', ''))
self.capsule = console_io.LazyFormat(
_NormalizeDescription(capsule),
command=self.name,
parent_command=parent_command)
self.release, description = self.__Release(
command, self.release, getattr(command, 'long_help', ''))
self.description = console_io.LazyFormat(
_NormalizeDescription(description),
command=self.name,
index=self.capsule,
parent_command=parent_command)
sections = getattr(command, 'detailed_help', None)
if sections:
for s in sections:
if s == 'brief':
self.release, self.capsule = self.__Release(
command, self.release, sections[s])
else:
self.sections[s] = console_io.LazyFormat(
_NormalizeDescription(sections[s]),
command=self.name,
index=self.capsule,
description=self.description,
parent_command=parent_command)
self.commands = {}
# _parent is explicitly private so it won't appear in serialized output.
self._parent = parent
if parent:
parent.commands[self.name] = self
args = command.ai
# Initialize the mutually exclusive flag groups.
group_count = {}
group_name = {}
for arg in args.flag_args:
for name in arg.option_strings:
if name.startswith('--'):
name = name.replace('_', '-')
if not self.__Ancestor(name):
g = args.mutex_groups.get(arg.dest, None)
if g:
group_name[name] = g
if g in group_count:
group_count[g] += 1
else:
group_count[g] = 1
group_id_count = 0
group_id = {}
# Sorted iteration preserves group_id[] indices across separate invocations
# where the mutex groups do not change.
for _, g in sorted(group_name.iteritems()):
if group_count[g] > 1:
group_count[g] = 0 # Don't check this group again!
group_id_count += 1
group_id[g] = group_id_count
# Collect the flags.
for arg in sorted(args.flag_args):
for name in arg.option_strings:
if name.startswith('--'):
name = name.replace('_', '-')
# Don't include ancestor flags.
if not self.__Ancestor(name):
flag = Flag(name, description=_NormalizeDescription(arg.help),
default=arg.default)
# ArgParse does not have an explicit Boolean flag type. By
# convention a flag with arg.nargs=0 and action='store_true' or
# action='store_false' is a Boolean flag. arg.type gives no hint
# (arg.type=bool would have been so easy) and we don't have access
# to args.action here. Even then the flag can take on non-Boolean
# values. If arg.default is not specified then it will be None, but
# it can be set to anything. So we do a conservative 'truthiness'
# test here.
if arg.nargs == 0:
flag.type = 'bool'
flag.default = True if arg.default else False
else:
if arg.type == int:
flag.type = 'int'
elif arg.type == float:
flag.type = 'float'
if arg.nargs == '*':
pass
elif arg.nargs == '?':
flag.countmax = 1
elif arg.nargs == '+':
flag.countmin = 1
elif type(arg.nargs) in (int, long):
flag.countmin = arg.nargs
flag.countmax = arg.nargs
if arg.metavar:
flag.value = arg.metavar
else:
flag.value = name[2:].upper()
if arg.choices:
choices = sorted(arg.choices)
if choices == ['false', 'true']:
flag.type = 'bool'
else:
flag.choices = choices
if arg.required:
flag.required = 1
flag.resource = getattr(arg, 'completion_resource', '')
if name in group_name and group_name[name] in group_id:
flag.group = group_id[group_name[name]]
self.flags[flag.name] = flag
# Collect the positionals.
for arg in args.positional_args:
name = arg.dest.replace('_', '-')
positional = Positional(name, description=_NormalizeDescription(arg.help))
if arg.metavar:
positional.value = arg.metavar
if arg.nargs != 0:
if arg.nargs == '*':
pass
elif arg.nargs == '?':
positional.countmax = 1
elif arg.nargs == '+':
positional.countmin = 1
elif type(arg.nargs) in (int, long):
positional.countmin = arg.nargs
positional.countmax = arg.nargs
positional.resource = getattr(arg, 'completion_resource', '')
self.positionals.append(positional)
def __Ancestor(self, flag):
"""Determines if flag is provided by an ancestor command.
Args:
flag: str, The flag name (no leading '-').
Returns:
bool, True if flag provided by an ancestor command, false if not.
"""
command = self._parent
while command:
if flag in command.flags:
return True
command = command._parent # pylint: disable=protected-access
return False
def __Release(self, command, release, description):
"""Determines the release type from the description text.
Args:
command: Command, The CLI command/group description.
release: int, The default release type.
description: str, The command description markdown.
Returns:
(release, description): (int, str), The actual release and description
with release prefix omitted.
"""
description = _NormalizeDescription(description)
path = command.GetPath()
if len(path) >= 2 and path[1] == 'internal':
release = 'INTERNAL'
return release, description
|
{
"content_hash": "a275baa16aa3aa5da2def29cffe8c435",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 80,
"avg_line_length": 35.06390977443609,
"alnum_prop": 0.6029805939744827,
"repo_name": "flgiordano/netcash",
"id": "1af2e6de06ed8f33344bda55860f048541e0b41c",
"size": "9923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/calliope/cli_tree.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
}
|
'''This models is an example for training a classifier on SNLI'''
from __future__ import print_function
from os.path import join
import nltk
import numpy as np
import os
import urllib
import zipfile
import sys
from spodernet.hooks import AccuracyHook, LossHook, ETAHook
from spodernet.preprocessing.pipeline import Pipeline
from spodernet.preprocessing.processors import AddToVocab, CreateBinsByNestedLength, SaveLengthsToState, ConvertTokenToIdx, StreamToHDF5, Tokenizer, NaiveNCharTokenizer
from spodernet.preprocessing.processors import JsonLoaderProcessors, DictKey2ListMapper, RemoveLineOnJsonValueCondition, ToLower
from spodernet.preprocessing.batching import StreamBatcher
from spodernet.utils.logger import Logger, LogLevel
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.util import get_data_path
from spodernet.frontend import Model, PairedBiDirectionalLSTM, SoftmaxCrossEntropy, Embedding, Trainer
Config.parse_argv(sys.argv)
np.set_printoptions(suppress=True)
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
def snli2json():
'''Preprocesses SNLI data and returns to spoder files'''
files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl',
'snli_1.0_test.jsonl']
archive, snli_dir = download_snli()
new_files = ['train.data', 'dev.data', 'test.data']
names = ['train', 'dev', 'test']
if not os.path.exists(join(snli_dir, new_files[0])):
for name, new_name in zip(files, new_files):
print('Writing {0}...'.format(new_name))
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
snli_file = archive.open(join('snli_1.0', name), 'r')
with open(join(snli_dir, new_name), 'w') as datafile:
for line in snli_file:
data = json.loads((line))
if data['gold_label'] == '-':
continue
premise = data['sentence1']
hypothesis = data['sentence2']
target = data['gold_label']
datafile.write(
json.dumps([premise, hypothesis, target]) + '\n')
return [names, [join(snli_dir, new_name) for new_name in new_files]]
def preprocess_SNLI(delete_data=False):
# load data
#names, file_paths = snli2json()
#train_path, dev_path, test_path = file_paths
tokenizer = nltk.tokenize.WordPunctTokenizer()
zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0')
file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']
not_t = []
t = ['input', 'support', 'target']
# tokenize and convert to hdf5
# 1. Setup pipeline to save lengths and generate vocabulary
p = Pipeline('snli_example', delete_data)
p.add_path(join(zip_path, file_paths[0]))
p.add_line_processor(JsonLoaderProcessors())
p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_token_processor(AddToVocab())
p.add_post_processor(SaveLengthsToState())
p.execute()
p.clear_processors()
p.state['vocab'].save_to_disk()
# 2. Process the data further to stream it to hdf5
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_post_processor(ConvertTokenToIdx())
p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128))
state = p.execute()
# dev and test data
p2 = Pipeline('snli_example')
p2.copy_vocab_from_pipeline(p)
p2.add_path(join(zip_path, file_paths[1]))
p2.add_line_processor(JsonLoaderProcessors())
p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(SaveLengthsToState())
p2.execute()
p2.clear_processors()
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(ConvertTokenToIdx())
p2.add_post_processor(StreamToHDF5('snli_dev'))
p2.execute()
p3 = Pipeline('snli_example')
p3.copy_vocab_from_pipeline(p)
p3.add_path(join(zip_path, file_paths[2]))
p3.add_line_processor(JsonLoaderProcessors())
p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(SaveLengthsToState())
p3.execute()
p3.clear_processors()
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(ConvertTokenToIdx())
p3.add_post_processor(StreamToHDF5('snli_test'))
p3.execute()
def main():
Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO
#Config.backend = Backends.TENSORFLOW
Config.backend = Backends.TORCH
Config.cuda = True
Config.dropout = 0.1
Config.hidden_size = 128
Config.embedding_size = 256
Config.L2 = 0.00003
do_process = False
if do_process:
preprocess_SNLI(delete_data=True)
p = Pipeline('snli_example')
vocab = p.state['vocab']
vocab.load_from_disk()
batch_size = 128
if Config.backend == Backends.TENSORFLOW:
from spodernet.backends.tfbackend import TensorFlowConfig
TensorFlowConfig.init_batch_size(batch_size)
train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8)
#train_batcher.subscribe_to_batch_prepared_event(SomeExpensivePreprocessing())
dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size)
test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size)
train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=1000))
dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=1000))
eta = ETAHook(print_every_x_batches=1000)
train_batcher.subscribe_to_events(eta)
train_batcher.subscribe_to_start_of_epoch_event(eta)
model = Model()
model.add(Embedding(128, vocab.num_embeddings))
model.add(PairedBiDirectionalLSTM(128, hidden_size=256, variable_length=True, conditional_encoding=False))
model.add(SoftmaxCrossEntropy(input_size=256*4, num_labels=3))
t = Trainer(model)
for i in range(10):
t.train(train_batcher, epochs=1)
t.evaluate(dev_batcher)
if __name__ == '__main__':
main()
|
{
"content_hash": "b83aa7cd27497f9af19229b552ca0ced",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 168,
"avg_line_length": 38.609756097560975,
"alnum_prop": 0.6789639924194567,
"repo_name": "TimDettmers/spodernet",
"id": "53ef133039ce6a1b58342ed0b819da9866d9eff2",
"size": "7915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/snli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176696"
}
],
"symlink_target": ""
}
|
from pytest import raises, skip
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_continuum import (
versioning_manager, ImproperlyConfigured, TransactionFactory
)
from tests import TestCase
class TestVersionedModelWithoutVersioning(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'versioning': False
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
self.TextItem = TextItem
def test_does_not_create_history_class(self):
assert 'class' not in self.TextItem.__versioned__
def test_does_not_create_history_table(self):
assert 'text_item_history' not in self.Model.metadata.tables
def test_does_add_objects_to_unit_of_work(self):
self.session.add(self.TextItem())
self.session.commit()
class TestWithUnknownUserClass(object):
def test_raises_improperly_configured_error(self):
self.Model = declarative_base()
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
self.TextItem = TextItem
versioning_manager.user_cls = 'User'
versioning_manager.declarative_base = self.Model
factory = TransactionFactory()
with raises(ImproperlyConfigured):
factory(versioning_manager)
class TestWithCreateModelsAsFalse(TestCase):
should_create_models = False
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
content = sa.Column(sa.UnicodeText)
description = sa.Column(sa.UnicodeText)
class Category(self.Model):
__tablename__ = 'category'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
article_id = sa.Column(sa.Integer, sa.ForeignKey(Article.id))
article = sa.orm.relationship(
Article,
backref=sa.orm.backref(
'category',
uselist=False
)
)
self.Article = Article
self.Category = Category
def test_does_not_create_models(self):
assert 'class' not in self.Article.__versioned__
def test_insert(self):
if self.options['native_versioning'] is False:
skip()
article = self.Article(name=u'Some article')
self.session.add(article)
self.session.commit()
version = dict(
self.session.execute('SELECT * FROM article_version')
.fetchone()
)
assert version['transaction_id'] > 0
assert version['id'] == article.id
assert version['name'] == u'Some article'
class TestWithoutAnyVersionedModels(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
content = sa.Column(sa.UnicodeText)
description = sa.Column(sa.UnicodeText)
self.Article = Article
def test_insert(self):
article = self.Article(name=u'Some article')
self.session.add(article)
self.session.commit()
|
{
"content_hash": "d37e4e56d0963053f575edec472cf21e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 76,
"avg_line_length": 31.201680672268907,
"alnum_prop": 0.6051710207379477,
"repo_name": "piotr-dobrogost/sqlalchemy-continuum",
"id": "9dd034cca2d7ee4b00d046ac2120b53274b53a52",
"size": "3713",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_configuration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "305180"
}
],
"symlink_target": ""
}
|
"""VIF drivers for libvirt."""
import copy
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, vif, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.get('properties',
{}).get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif, image_meta,
inst_type, virt_type)
def get_config_ovs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance,
vif,
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
return conf
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
pass
def _plug_bridge_with_port(self, instance, vif, port):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance['uuid'])
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance['uuid'])
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ovs')
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
else:
self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ivs')
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
device_id = instance['uuid']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_vhostuser(self, instance, vif):
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if ovs_plug:
iface_id = self.get_ovs_interfaceid(vif)
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
port_name, iface_id, vif['address'],
instance['uuid'])
linux_net.ovs_set_vhostuser_port_type(port_name)
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (cfg.CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s "
"--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s"
" --vm_name=%s --mac=%s --tap_name=%s --port_type=%s "
"--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'],
instance.uuid, vif['network']['id'],
instance.project_id, ip_addr, ip6_addr,
instance.display_name, vif['address'],
vif['devname'], ptype, -1, -1))
try:
linux_net.create_tap_dev(dev)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser(self, instance, vif):
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if ovs_plug:
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
port_name)
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
cmd_args = ("--oper=delete --uuid=%s" % (vif['id']))
try:
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while unplugging vif"), instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
|
{
"content_hash": "6ea28f3d52eb6d7457f32f025fad4f16",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 79,
"avg_line_length": 40.24609375,
"alnum_prop": 0.5319486233783041,
"repo_name": "orbitfp7/nova",
"id": "1b7fe61b5965883429c87cf59582ede6ed7fc5e0",
"size": "31611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/vif.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3272"
},
{
"name": "Python",
"bytes": "15640028"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "XML",
"bytes": "45493"
}
],
"symlink_target": ""
}
|
from mangopaysdk.types.oauthtoken import OAuthToken
from mangopaysdk.tools.apibase import ApiBase
from mangopaysdk.tools.resttool import RestTool
import logging
class ApiOAuth(ApiBase):
"""MangoPay API methods for users."""
def CreateToken(self):
"""Get token information for OAuth Authentication.
return MangoPay OAuthToken object with token information
"""
urlMethod = self._getRequestUrl('authentication_oauth')
requestType = self._getRequestType('authentication_oauth')
requestData = {
'grant_type' : 'client_credentials'
}
rest = RestTool(self._root, False)
response = rest.Request(urlMethod, requestType, requestData)
token = self._castAuthResponseToEntity(response)
return token
def _castAuthResponseToEntity(self, dict):
res = OAuthToken()
res.access_token = dict['access_token']
res.token_type = dict['token_type']
res.expires_in = dict['expires_in']
res.valid = True
if (self._root.Config.DebugMode):
logging.getLogger(__name__).debug('New token created: {0} '.format(res.access_token))
return res
|
{
"content_hash": "ac5cb93769c0b36251346dad37831db5",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 35.38235294117647,
"alnum_prop": 0.6541978387364921,
"repo_name": "gracaninja/mangopay2-python-sdk",
"id": "91d15d359025b729c7b254abceacbb5abf56d3ac",
"size": "1203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mangopaysdk/tools/apioauth.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from .common import SeaBreezeError
from .spectrometer import (
SpectrometerFeatureUSB2000,
SpectrometerFeatureHR2000,
SpectrometerFeatureHR4000,
SpectrometerFeatureUSB650,
SpectrometerFeatureHR2000PLUS,
SpectrometerFeatureQE65000,
SpectrometerFeatureUSB2000PLUS,
SpectrometerFeatureUSB4000,
SpectrometerFeatureNIRQUEST512,
SpectrometerFeatureNIRQUEST256,
SpectrometerFeatureMAYA2000PRO,
SpectrometerFeatureMAYA2000,
SpectrometerFeatureTORUS,
SpectrometerFeatureAPEX,
SpectrometerFeatureMAYALSL,
SpectrometerFeatureJAZ,
SpectrometerFeatureSTS,
SpectrometerFeatureQEPRO,
SpectrometerFeatureVENTANA,
)
from .wavelength import WavelengthCoefficientsEEPromFeature
from .eeprom import EEPromFeature
from .thermoelectric import ThermoElectricFeatureOOI, ThermoElectricFeatureOBP
from .nonlinearity import NonlinearityCoefficientsEEPromFeature, NonlinearityCoefficientsOBPFeature
from .defines import EndPoints
from .common import (
NotImplementedWrapper,
NoShutterFeature,
NoTecFeature,
NoEEPromFeature,
)
class USB2000(SpectrometerFeatureUSB2000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['USB2000']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 3000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 4095
class HR2000(SpectrometerFeatureHR2000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['HR2000']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 3000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 4095
class HR4000(SpectrometerFeatureHR4000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['HR4000']
_PIXELS = 3840 # FIXME
_RAW_SPECTRUM_LEN = (3840 * 2) + 1
_INTEGRATION_TIME_MIN = 10
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 16383
class USB650(SpectrometerFeatureUSB650,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['USB650']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 3000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 4095
class HR2000PLUS(SpectrometerFeatureHR2000PLUS,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['HR2000PLUS']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 16383
class QE65000(SpectrometerFeatureQE65000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
ThermoElectricFeatureOOI,
NoShutterFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['QE65000']
_PIXELS = 1280 # FIXME
_RAW_SPECTRUM_LEN = (1024 + 256)*2 + 1
_INTEGRATION_TIME_MIN = 8000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 65535
class USB2000PLUS(SpectrometerFeatureUSB2000PLUS,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['USB2000PLUS']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
class USB4000(SpectrometerFeatureUSB4000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['USB4000']
_PIXELS = 3840 # FIXME
_RAW_SPECTRUM_LEN = (3840 * 2) + 1
_INTEGRATION_TIME_MIN = 10
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
class NIRQUEST512(SpectrometerFeatureNIRQUEST512,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
ThermoElectricFeatureOOI,
NoShutterFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['NIRQUEST512']
_PIXELS = 512 # FIXME
_RAW_SPECTRUM_LEN = (512 * 2) + 1
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 65535
class NIRQUEST256(SpectrometerFeatureNIRQUEST256,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
ThermoElectricFeatureOOI,
NoShutterFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['NIRQUEST256']
_PIXELS = 256 # FIXME
_RAW_SPECTRUM_LEN = (256 * 2) + 1
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1000
_MAX_PIXEL_VALUE = 65535
class MAYA2000PRO(SpectrometerFeatureMAYA2000PRO,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['MAYA2000PRO']
_PIXELS = 2304 # FIXME
_RAW_SPECTRUM_LEN = (2304 * 2) + 1
_INTEGRATION_TIME_MIN = 7200
_INTEGRATION_TIME_MAX = 65000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 64000
class MAYA2000(SpectrometerFeatureMAYA2000,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['MAYA2000']
_PIXELS = 2304 # FIXME
_RAW_SPECTRUM_LEN = (2304 * 2) + 1
_INTEGRATION_TIME_MIN = 15000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
class TORUS(SpectrometerFeatureTORUS,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['TORUS']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) + 1
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
class APEX(SpectrometerFeatureAPEX,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['APEX']
_PIXELS = 2304 # FIXME
_RAW_SPECTRUM_LEN = (2304 * 2) + 1
_INTEGRATION_TIME_MIN = 15000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 64000
class MAYALSL(SpectrometerFeatureMAYALSL,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['MAYALSL']
_PIXELS = 2304 # FIXME
_RAW_SPECTRUM_LEN = (2304 * 2) + 1
_INTEGRATION_TIME_MIN = 7200
_INTEGRATION_TIME_MAX = 65000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 64000
class JAZ(SpectrometerFeatureJAZ,
WavelengthCoefficientsEEPromFeature,
NonlinearityCoefficientsEEPromFeature,
EEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['JAZ']
_PIXELS = 2048 # FIXME
_RAW_SPECTRUM_LEN = (2048 * 2) # XXX: No Sync byte!
_INTEGRATION_TIME_MIN = 1000
_INTEGRATION_TIME_MAX = 655350000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
class STS(SpectrometerFeatureSTS,
NonlinearityCoefficientsOBPFeature,
NoEEPromFeature,
NoShutterFeature,
NoTecFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['STS']
_PIXELS = 1024 # FIXME
_RAW_SPECTRUM_LEN = (1024 * 2) # XXX: No Sync byte!
_INTEGRATION_TIME_MIN = 10
_INTEGRATION_TIME_MAX = 85000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 16383
class QEPRO(SpectrometerFeatureQEPRO,
ThermoElectricFeatureOBP,
NonlinearityCoefficientsOBPFeature,
NoEEPromFeature,
NoShutterFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['QEPRO']
_PIXELS = 1044 # FIXME
_RAW_SPECTRUM_LEN = (1044 * 4) + 32 # XXX: Metadata
_INTEGRATION_TIME_MIN = 10000
_INTEGRATION_TIME_MAX = 1600000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = (2**18)-1
class VENTANA(SpectrometerFeatureVENTANA,
ThermoElectricFeatureOBP,
NonlinearityCoefficientsOBPFeature,
NoEEPromFeature,
NoShutterFeature,
NotImplementedWrapper):
_ENDPOINT_MAP = EndPoints['VENTANA']
_PIXELS = 1024 # FIXME
_RAW_SPECTRUM_LEN = (1024 * 2) # XXX: No Sync byte!
_INTEGRATION_TIME_MIN = 22000
_INTEGRATION_TIME_MAX = 60000000
_INTEGRATION_TIME_BASE = 1
_MAX_PIXEL_VALUE = 65535
USBInterfaces = {
0x1002: USB2000,
0x100a: HR2000,
0x1012: HR4000,
0x1014: USB650,
0x1016: HR2000PLUS,
0x1018: QE65000,
0x101E: USB2000PLUS,
0x1022: USB4000,
0x1026: NIRQUEST512,
0x1028: NIRQUEST256,
0x102a: MAYA2000PRO,
0x102c: MAYA2000,
0x1040: TORUS,
0x1044: APEX,
0x1046: MAYALSL,
0x2000: JAZ,
0x4000: STS,
0x4004: QEPRO,
0x5000: VENTANA,
}
|
{
"content_hash": "f95864e685f0e2533d330248a57cdb11",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 99,
"avg_line_length": 34.39296187683284,
"alnum_prop": 0.6209924965893588,
"repo_name": "fishazam/api",
"id": "f19884534c6c8769095e045bb2b8fb0d9a204d82",
"size": "11728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seabreeze/pyseabreeze/interfaces/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "86065"
},
{
"name": "Shell",
"bytes": "1223"
}
],
"symlink_target": ""
}
|
import logging
import hashlib
import multiprocessing
import os
import re
import shutil
import subprocess
import time
from smqtk.utils import file_utils, string_utils
__author__ = "paul.tunison@kitware.com"
class VideoMetadata (object):
"""
Simple container for video file metadata values
"""
def __init__(self):
#: :type: None or int
self.width = None
#: :type: None or int
self.height = None
#: :type: None or float
self.fps = None
#: :type: None or float
self.duration = None
def get_metadata_info(video_filepath, ffprobe_exe='ffprobe'):
"""
Use ffmpeg to extract video file metadata parameters
:param video_filepath: File path to the video to probe.
:type video_filepath: str
:param ffprobe_exe: Path to the ffprobe executable to use. By default, we
try to use the version that's on the PATH.
:return: VideoMetadata instance
:rtype: VideoMetadata
"""
log = logging.getLogger('smqtk.utils.video_utils.get_metadata_info')
re_float_match = "[+-]?(?:(?:\d+\.?\d*)|(?:\.\d+))(?:[eE][+-]?\d+)?"
log.debug("Using ffprobe: %s", ffprobe_exe)
cmd = [ffprobe_exe, '-i', video_filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# ffprobe puts output to err stream
if p.returncode: # non-zero
raise RuntimeError("Failed to probe video file. Error:\n%s"
% err)
# WxH
m = re.search("Stream.*Video.* (\d+)x(\d+)", err)
if m:
width = int(m.group(1))
height = int(m.group(2))
else:
raise RuntimeError("Couldn't find width/height specification "
"for video file '%s'" % video_filepath)
# FPS
m = re.search("Stream.*Video.* (%s) fps" % re_float_match, err)
if m:
fps = float(m.group(1))
else:
# falling back on tbr measurement
log.debug("Couldn't find fps measurement, looking for TBR")
m = re.search("Stream.*Video.* (%s) tbr" % re_float_match, err)
if m:
fps = float(m.group(1))
else:
raise RuntimeError("Couldn't find tbr specification for "
"video file '%s'" % video_filepath)
# Duration
m = re.search("Duration: (\d+):(\d+):(%s)" % re_float_match, err)
if m:
duration = (
(60 * 60 * int(m.group(1))) # hours
+ (60 * int(m.group(2))) # minutes
+ float(m.group(3)) # seconds
)
else:
raise RuntimeError("Couldn't find duration specification for "
"video file '%s'" % video_filepath)
md = VideoMetadata()
md.width = width
md.height = height
md.fps = fps
md.duration = duration
return md
def ffmpeg_extract_frame(t, input_filepath, output_filepath,
ffmpeg_exe='ffmpeg'):
"""
Extract a frame a the given time ``t`` from the input video file.
Output file may not exist or be of 0 size if we failed to extract the frame.
"""
cmd = [ffmpeg_exe, '-accurate_seek', '-ss', str(t), '-i', input_filepath,
'-frames:v', '1', output_filepath]
sPIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=sPIPE, stderr=sPIPE)
_, _ = p.communicate()
# if p.returncode != 0:
# raise RuntimeError("FFmpeg failed to extract frame at time %f! "
# "(return code: %d)"
# % (t, p.returncode))
def ffmpeg_extract_frame_map(working_dir, video_filepath, second_offset=0,
second_interval=0, max_duration=0, frames=(),
output_image_ext="png", parallel=None,
ffmpeg_exe='ffmpeg'):
"""
Return a mapping of video frame index to image file in the given output
format.
If frames requested have not yet been extracted (based on what's contained
in the specified output directory), they are done now. This means that this
method could take a little time to complete if there are many frames in the
video file and this is the first time this is being called.
This may return an empty list if there are no frames in the video for
the specified, or default, constraints.
Extracted frames are cached in a directory structure under the provided
``working_dir`` directory path: ``<working_dir>/VideoFrameExtraction``.
Frames are extracted into separate directories based on the SHA1 checksum of
the video file.
:raises RuntimeError: No frames were extracted.
:param working_dir: Working directory for frame extraction to occur in.
:type working_dir: str
:param video_filepath: Path to the video to extract frames from.
:type video_filepath: str
:param second_offset: Seconds into the video to start extracting
:type second_offset: float
:param second_interval: Number of seconds between extracted frames
:type second_interval: float
:param max_duration: Maximum number of seconds worth of extracted frames
(starting from the specified offset). If <=0, we extract until the end
of the video.
:type max_duration: float
:param frames: Specific exact frame numbers within the video to extract.
Providing explicit frames causes offset, interval and duration
parameters to be ignored and only the frames specified here to be
extracted and returned.
:type frames: collections.Iterable[int]
:param parallel: Number of processes to use for frame extraction. This is
None by default, meaning that all available cores/threads are used.
:type parallel: int or None
:param ffmpeg_exe: ffmpeg executable to use for frame extraction. By
default, we attempt to use what is available of the PATH.
:type ffmpeg_exe: str or unicode
:return: Map of frame-to-filepath for requested video frames
:rtype: dict of (int, str)
"""
log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map')
video_md = get_metadata_info(video_filepath)
video_sha1sum = hashlib.sha1(open(video_filepath, 'rb').read()).hexdigest()
frame_output_dir = os.path.join(
working_dir,
"VideoFrameExtraction",
*string_utils.partition_string(video_sha1sum, 10)
# 40 hex chars split into chunks of 4
)
file_utils.safe_create_dir(frame_output_dir)
def filename_for_frame(frame, ext):
"""
method standard filename for a given frame file
"""
return "%08d.%s" % (frame, ext.lstrip('.'))
def iter_frames_for_interval():
"""
Return a generator expression yielding frame numbers from the input
video that match the given query parameters. Indices returned are
0-based (i.e. first frame is 0, not 1).
We are making a sensible assumption that we are not dealing with frame
speeds of over 1000Hz and rounding frame frame times to the neared
thousandth of a second to mitigate floating point error.
:rtype: list of int
"""
_log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map'
'.iter_frames_for_interval')
num_frames = int(video_md.fps * video_md.duration)
first_frame = second_offset * video_md.fps
_log.debug("First frame: %f", first_frame)
if max_duration > 0:
cutoff_frame = min(float(num_frames),
(max_duration + second_offset) * video_md.fps)
else:
cutoff_frame = float(num_frames)
_log.debug("Cutoff frame: %f", cutoff_frame)
if second_interval:
incr = second_interval * video_md.fps
else:
incr = 1.0
_log.debug("Frame increment: %f", incr)
# Interpolate
yield first_frame
next_frm = first_frame + incr
while next_frm < cutoff_frame:
_log.debug("-- adding frame: %f", next_frm)
yield int(next_frm)
next_frm += incr
def extract_frames(frames_to_process):
"""
Extract specific frames from the input video file using ffmpeg. If not
all frames could be extracted, we return what we were able to extract.
:param frames_to_process: Mapping of frame-number:filepath pairs to
extract from the input video.
:type frames_to_process: dict[int,str or unicode]
:return: List of frames that were successfully extracted.
:rtype: list[int]
"""
_log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map'
'.extract_frames')
# Setup temp extraction directory
tmp_extraction_dir = os.path.join(frame_output_dir, ".TMP")
if os.path.isdir(tmp_extraction_dir):
_log.debug("Existing temp director found, removing and starting "
"over")
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
os.makedirs(tmp_extraction_dir)
p = multiprocessing.Pool(parallel)
# Mapping of frame to (result, output_filepath)
#: :type: dict of (int, (AsyncResult, str))
rmap = {}
for f, ofp in frames_to_process.iteritems():
tfp = os.path.join(tmp_extraction_dir,
filename_for_frame(f, output_image_ext))
t = f / video_md.fps
rmap[f] = (
p.apply_async(ffmpeg_extract_frame,
args=(t, video_filepath, tfp, ffmpeg_exe)),
tfp
)
p.close()
# Check for failures
extracted_frames = []
for f, ofp in frames_to_process.iteritems():
r, tfp = rmap[f]
r.get() # wait for finish
if not os.path.isfile(tfp):
_log.warn("Failed to generated file for frame %d", f)
else:
extracted_frames.append(f)
os.rename(tfp, ofp)
p.join()
del p
os.removedirs(tmp_extraction_dir)
_log.debug("Frame extraction complete")
return extracted_frames
# Determine frames to extract from video
extract_indices = set()
if frames:
log.debug("Only extracting specified frames: %s", frames)
extract_indices.update(frames)
else:
log.debug("Determining frames needed for specification: "
"offset: %f, interval: %f, max_duration: %f",
second_offset, second_interval, max_duration)
extract_indices.update(iter_frames_for_interval())
if not extract_indices:
return {}
# frame/filename map that will be returned based on requested frames
frame_map = dict(
(i, os.path.join(frame_output_dir,
filename_for_frame(i, output_image_ext)))
for i in extract_indices
)
###
# Acquire a file-base lock in output directory so that we don't conflict
# with another process extracting frames to the same directory.
#
# NOTE: This method is prone to starvation if many processes are trying
# to extract to the same video frames, but not yet probably due to
# existing use cases.
#
lock_file = os.path.join(frame_output_dir, '.lock')
log.debug("Acquiring file lock in '%s'...", frame_output_dir)
while not file_utils.exclusive_touch(lock_file):
# This is sufficiently small to be fine grained, but not so small to
# burn the CPU.
time.sleep(0.01)
log.debug("Acquiring file lock -> Acquired!")
try:
###
# Determine frames to actually extract base on existing files (if any)
#
#: :type: dict[int, str]
frames_to_process = {}
existing_frames = []
for i, img_file in sorted(frame_map.items()):
if not os.path.isfile(img_file):
log.debug('frame %d needs processing', i)
frames_to_process[i] = img_file
else:
existing_frames.append(i)
###
# Extract needed frames via hook function that provides
# implementation.
#
if frames_to_process:
frames_extracted = extract_frames(frames_to_process)
if (len(existing_frames) + len(frames_extracted)) == 0:
raise RuntimeError("Failed to extract any frames for video")
return frame_map
finally:
os.remove(lock_file)
|
{
"content_hash": "c0d1b2a6f696ff8fcbc53aab373bee25",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 80,
"avg_line_length": 35.89518413597734,
"alnum_prop": 0.5960855496803725,
"repo_name": "Purg/SMQTK",
"id": "f03d6a7c1349bfcbeb006bd6a7b503e3a53defb0",
"size": "12671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/smqtk/utils/video_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "93558"
},
{
"name": "C++",
"bytes": "812600"
},
{
"name": "CMake",
"bytes": "68672"
},
{
"name": "CSS",
"bytes": "2297"
},
{
"name": "Cuda",
"bytes": "69131"
},
{
"name": "HTML",
"bytes": "79601"
},
{
"name": "Java",
"bytes": "97253"
},
{
"name": "JavaScript",
"bytes": "123457"
},
{
"name": "Jupyter Notebook",
"bytes": "85336"
},
{
"name": "M4",
"bytes": "61280"
},
{
"name": "Makefile",
"bytes": "4344"
},
{
"name": "Matlab",
"bytes": "23266"
},
{
"name": "Perl",
"bytes": "3762394"
},
{
"name": "Python",
"bytes": "1281460"
},
{
"name": "Shell",
"bytes": "26340"
},
{
"name": "TeX",
"bytes": "74581"
}
],
"symlink_target": ""
}
|
from django.template import Library
register = Library()
@register.simple_tag
def get_object_properties(object, properties):
"""Returns first non empty property of given object."""
properties = properties.split(',')
for property in properties:
attribute = getattr(object, property, '')
if attribute:
return getattr(object.translated, property)
return ''
|
{
"content_hash": "8cc33c7f96abf2f2b5295878faee85fa",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 28.642857142857142,
"alnum_prop": 0.683291770573566,
"repo_name": "UITools/saleor",
"id": "d68a16392608f788c3795010fdc6e9b890233c94",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/core/templatetags/attributes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
}
|
import smtplib
import random
import string
def send_recovery_email(user):
recovery_code = "".join(random.choice(string.ascii_uppercase + string.ascii_uppercase) for _ in range(10))
message = \
""" From: NoReply <noreply@artizanz.com>
To: {0} <{1}>
MIME-Version: 1.0
Content-Type: text/html
Subject: Artizanz Password Recovery
Your recovery code is {2}
""".format(user.username, user.email, recovery_code)
try:
smtp_obj = smtplib.SMTP("localhost")
smtp_obj.sendmail("noreply@artizanz.com", [user.email], message)
return True
except smtplib.SMTPException:
return False
|
{
"content_hash": "95a94503365b90280f53c372d6a60e4b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 110,
"avg_line_length": 29.347826086956523,
"alnum_prop": 0.6340740740740741,
"repo_name": "BrambleLLC/Artizanz",
"id": "a34c9f96cb12b4de77d8e7614c479aed8ed10d18",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/email_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2801"
},
{
"name": "HTML",
"bytes": "64987"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "22467"
}
],
"symlink_target": ""
}
|
'''
# Shortest Word题目地址:https://www.codewars.com/kata/57cebe1dc6fdc20c57000ac9/train/python
'''
import unittest
class TestCases(unittest.TestCase):
def setUp(self):
pass
def test1(self):self.assertEqual(find_short("bitcoin take over the world maybe who knows perhaps"), 3)
def test2(self):self.assertEqual(find_short("turns out random test cases are easier than writing out basic ones"), 3)
def test3(self):self.assertEqual(find_short("lets talk about javascript the best language"), 3)
def test4(self):self.assertEqual(find_short("i want to travel the world writing code one day"), 1)
def test5(self):self.assertEqual(find_short("Lets all go on holiday somewhere very cold"), 2)
def find_short(s):
return min([len(word) for word in s.split(" ")])
if __name__ == '__main__':
unittest.main()
'''
参考解法:
'''
|
{
"content_hash": "deada1acfbb9f53757adc4a96bb28f8d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 121,
"avg_line_length": 27.28125,
"alnum_prop": 0.6849942726231386,
"repo_name": "karchi/codewars_kata",
"id": "801dcec7e7ff2517aeec16c8655c19d63c503ad0",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "已完成/Shortest Word.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122148"
}
],
"symlink_target": ""
}
|
"""Runs all of the tests in parallel with or without integration server.
This package doesn't need any setup and can do one of three things:
execute all unit, functional and integration tests:
python scripts/project.py --test=*
execute specific tests packages, classes or methods:
python scripts/project.py --test tests.unit.test_classes
execute a complete official Course Builder release verification process:
python scripts/project.py --release
It's possible to run this package headless, while still fully executing all
of the Selenium integration tests. Several tools are available to get this
done. The one we know works is xvfb -- virtual framebuffer display server.
Here is an example how to use it:
ssh $my_developer_box
sudo apt-get install xvfb
Xvfb :99 -ac & export DISPLAY=:99
cd /coursebuilder
python scripts/project.py --release
Good luck!
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import sys
import argparse
import datetime
import difflib
import logging
import multiprocessing
import re
import signal
import shutil
import socket
import stat
import subprocess
import threading
import time
import urllib2
import yaml
import zipfile
# defer some imports
all_tests = None
manifests = None
schema_fields = None
schema_transforms = None
INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'
# List of dot-qualified modules that may not be imported by CB code.
DISALLOWED_IMPORTS = [
'google.appengine.api.users',
]
# Map of relative cb path -> dot qualified import target of exceptions to
# DISALLOWED_IMPORTS.
DISALLOWED_IMPORTS_EXCEPTIONS = {
'common/users.py': ['google.appengine.api.users'],
'tests/functional/common_users.py': ['google.appengine.api.users'],
}
PY_FILE_SUFFIX = '.py'
LOG_LINES = []
LOG_LOCK = threading.Lock()
# Path to a log file, set if --also_log_to_file is supplied.
LOG_PATH = None
def _log_file(also_log_to_file):
"""Generates a log file path if also_log_to_file is *exactly* True."""
if also_log_to_file is not True:
return also_log_to_file
script_name = os.path.basename(sys.argv[0]).replace('.', '_')
log_now = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
return '/tmp/{}_{}.log'.format(script_name, log_now)
# exact count of compiled .mo catalog files included in release; change this
# when new files are added. NOTE: common.locales.LOCALES_DISPLAY_NAMES must
# be kept in sync with the locales supported.
EXPECTED_MO_FILE_COUNT = 58
PRODUCT_NAME = 'coursebuilder'
# name of the response header used to transmit handler class name;
# keep in sync with the same value in controllers/sites.py
GCB_HANDLER_CLASS_HEADER_NAME = 'gcb-handler-class'
# a lists of tests URL's that can be served statically or dynamically
STATIC_SERV_URLS = [
('/modules/oeditor/_static/js/butterbar.js', None), # always static
('/static/codemirror/lib/codemirror.js', 'CustomZipHandler'),
('/static/yui_3.6.0/yui/build/yui/yui.js', 'CustomZipHandler'),
('/static/inputex-3.1.0/src/loader.js', 'CustomZipHandler'),
(
'/static/2in3/2in3-master/dist/2.9.0/build/yui2-editor/yui2-editor.js',
'CustomZipHandler'),
]
# a lists of tests URL's served by combo zip handler
COMBO_SERV_URLS = [
(
'/static/combo/inputex?'
'src/inputex/assets/skins/sam/inputex.css&'
'src/inputex-list/assets/skins/sam/inputex-list.css',
'CustomCssComboZipHandler'),
]
# A directory, which all components of this file should treat as project root
BUILD_DIR = None
TEST_CLASS_NAME_ANY = '*'
# When cleaning unknown files from test arena, WARN IN BIG LETTERS if
# any file with one of these suffixes is found. (This pretty much amounts
# to ignoring .pyc and files with no suffixes, but that's probably right)
VALID_FILE_SUFFIXES = ('cfg', 'css', 'csv', 'html', 'ico', 'js', 'json',
'md', 'mo', 'neo4j', 'png', 'po', 'py', 'pylintrc',
'rc', 'sh', 'sql', 'txt', 'xml', 'yaml', 'zip')
IGNORE_PREFIXES = ('lib/', 'internal/', './PRESUBMIT.py', './static.yaml',
'tests/internal')
IGNORE_REGEXES = [
re.compile(r'^\./coursebuilder_\d{8,8}_\d{6,6}.zip$'),
re.compile(r'^\./log_\d{8,8}_\d{6,6}.txt$'),
]
def build_dir():
"""Convenience function to access BUILD_DIR."""
return BUILD_DIR
def make_default_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--test',
help='A dotted module name of the test(s) to run; '
'use "*" to run all tests',
type=str, default=None)
parser.add_argument(
'--release',
help='Whether to run an entire release validation process',
action='store_true')
parser.add_argument(
'--skip_integration', help='Whether to skip integration tests',
action='store_true')
parser.add_argument(
'--skip_non_integration',
help='Whether to skip functional and unit tests',
action='store_true')
parser.add_argument(
'--skip_integration_setup',
help='Skip integration server pre-test test.',
action='store_true')
parser.add_argument(
'--skip_pylint', help='Whether to skip pylint tests',
action='store_true')
parser.add_argument(
'--ignore_pylint_failures',
help='Whether to ignore pylint test failures',
action='store_true')
parser.add_argument(
'--deep_clean',
help='Whether to delete all temporary files, resources and caches '
'before starting the release process',
action='store_true')
parser.add_argument(
'--verbose',
help='Print more verbose output to help diagnose problems',
action='store_true')
parser.add_argument(
'--also_log_to_file',
metavar='LOG_FILE', nargs='?', default=False, const=True,
help='If option is present, log to a file in addition to the console. '
'If supplied *without* a log file path (i.e. simply a a flag), a file '
'path of the following form is used: {}'.format(_log_file(True)))
parser.add_argument(
'--server_log_file',
help='If present, capture stdout and stderr from integration server '
'to the named file. This is helpful when diagnosing a problem with '
'the server that does not manifest when the server is started outside '
'tests.')
parser.add_argument(
'--concurrent_tests',
type=int,
help='Number of tests to run concurrently. Defaults to two for each '
'processor on your computer.')
parser.add_argument(
'--pdb',
action='store_true',
help='Automatically enter a debugger when a test fails or errors.')
return parser
def ensure_port_available(port_number, quiet=False):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('localhost', port_number))
except socket.error, ex:
if not quiet:
logging.error('''
==========================================================
Failed to bind to port %d.
This probably means another CourseBuilder server is
already running. Be sure to shut down any manually
started servers before running tests.
Kill running server from command line via:
lsof -i tcp:%d -Fp | tr -d p | xargs kill -9
==========================================================''',
port_number, port_number)
raise ex
s.close()
def start_integration_server(server_log_file, env):
ensure_port_available(8000)
ensure_port_available(8081)
ensure_port_available(8082)
server_cmd = os.path.join(build_dir(), 'scripts', 'start_in_shell.sh')
return start_integration_server_process(
server_cmd,
set(['tests.integration.fake_visualizations']),
server_log_file, env=env)
def start_integration_server_process(
integration_server_start_cmd, modules, server_log_file, env):
if modules:
_fn = os.path.join(build_dir(), 'custom.yaml')
_st = os.stat(_fn)
os.chmod(_fn, _st.st_mode | stat.S_IWUSR)
fp = open(_fn, 'w')
fp.writelines([
'env_variables:\n',
' GCB_TEST_MODE: true\n'
' GCB_REGISTERED_MODULES_CUSTOM:\n'])
fp.writelines([' %s\n' % module for module in modules])
fp.close()
logging.info('Starting external server: %s', integration_server_start_cmd)
if server_log_file:
if not server_log_file.startswith('/tmp'):
raise ValueError(
'Server log file name should start with /tmp; '
'if it is in the local directory, the dev_appserver runtime '
'will notice the file contents change, and attempt to '
're-parse it, just-in-case. This will add some log lines, '
'so the file will change again, and will trigger a '
're-parse, which.... Just put the file in /tmp/...')
logfile = open(server_log_file, 'w')
else:
logfile = open(os.devnull, 'w')
server = subprocess.Popen(
integration_server_start_cmd, preexec_fn=os.setsid, stdout=logfile,
stderr=logfile, env=env)
time.sleep(3) # Wait for server to start up
return server, logfile
def stop_integration_server(server, logfile, modules):
server.kill() # dev_appserver.py itself.
logfile.close()
# The new dev appserver starts a _python_runtime.py process that isn't
# captured by start_integration_server and so doesn't get killed. Until it's
# done, our tests will never complete so we kill it manually.
os.killpg(server.pid, signal.SIGTERM)
# wait process to terminate
while True:
try:
ensure_port_available(8081, quiet=True)
ensure_port_available(8000, quiet=True)
break
except: # pylint: disable=bare-except
time.sleep(0.25)
# clean up
if modules:
fp = open(
os.path.join(build_dir(), 'custom.yaml'), 'w')
fp.writelines([
'# Add configuration for your application here to avoid\n'
'# potential merge conflicts with new releases of the main\n'
'# app.yaml file. Modules registered here should support the\n'
'# standard CourseBuilder module config. (Specifically, the\n'
'# imported Python module should provide a method\n'
'# "register_module()", taking no parameters and returning a\n'
'# models.custom_modules.Module instance.\n'
'#\n'
'env_variables:\n'
'# GCB_REGISTERED_MODULES_CUSTOM:\n'
'# modules.my_extension_module\n'
'# my_extension.modules.widgets\n'
'# my_extension.modules.blivets\n'
])
fp.close()
class WithReleaseConfiguration(object):
"""Class to manage integration server using 'with' statement."""
def __init__(
self,
enable_integration_server, enable_static_serving, config):
self.enable_integration_server = enable_integration_server
self.enable_static_serving = enable_static_serving
self.server_log_file = config.parsed_args.server_log_file
def __enter__(self):
if self.enable_integration_server:
log(
'Starting integration server '
'(static serving %s)' % (
'enabled' if self.enable_static_serving else 'disabled'))
env = os.environ.copy()
env['GCB_ALLOW_STATIC_SERV'] = (
'true' if self.enable_static_serving else 'false')
self.server, self.logfile = start_integration_server(
self.server_log_file, env=env)
def __exit__(self, unused_type, unused_value, unused_traceback):
if self.enable_integration_server:
log('Stopping integration server')
stop_integration_server(
self.server, self.logfile,
set(['tests.integration.fake_visualizations']))
def log(message):
with LOG_LOCK:
line = '%s\t%s' % (
datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), message)
LOG_LINES.append(line)
print line
if LOG_PATH:
with open(LOG_PATH, 'a', 0) as also_log_to_file:
also_log_to_file.write('{}\n'.format(line))
def run(exe, strict=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
verbose=False):
"""Runs a shell command and captures the stdout and stderr output."""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout, last_stderr = p.communicate()
result = []
if last_stdout:
for line in last_stdout:
result.append(line)
if last_stderr:
for line in last_stderr:
result.append(line)
result = ''.join(result)
if p.returncode != 0 and verbose and 'KeyboardInterrupt' not in result:
exe_string = ' '.join(exe)
print '#########vvvvv########### Start of output from >>>%s<<< ' % (
exe_string)
print result
print '#########^^^^^########### End of output from >>>%s<<<' % (
exe_string)
if p.returncode != 0 and strict:
raise Exception('Error %s\n%s' % (p.returncode, result))
return p.returncode, result
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def execute_task_list(
cls, tasks,
chunk_size=None, runtimes_sec=None, fail_on_first_error=False):
if chunk_size is None:
chunk_size = len(tasks)
assert chunk_size > 0
assert chunk_size < 256
if runtimes_sec is None:
runtimes_sec = []
errors = []
todo = [] + tasks
running = set()
task_to_runtime_sec = {}
def on_error(error, task):
errors.append(error)
log(Exception(error))
log('Failed task: %s.' % task.name)
if fail_on_first_error:
raise Exception(error)
def update_progress():
log(
'Progress so far: '
'%s failed, %s completed, %s running, %s pending.' % (
len(errors), len(tasks) - len(todo) - len(running),
len(running), len(todo)))
last_update_on = 0
while todo or running:
# update progress
now = time.time()
update_frequency_sec = 30
if now - last_update_on > update_frequency_sec:
last_update_on = now
update_progress()
# check status of running jobs
if running:
for task in list(running):
task.join(1)
if task.isAlive():
start, end = task_to_runtime_sec[task]
now = time.time()
if now - end > 60:
log('Waiting over %ss for: %s' % (
int(now - start), task.name))
task_to_runtime_sec[task] = (start, now)
continue
if task.exception:
on_error(task.exception, task)
start, _ = task_to_runtime_sec[task]
now = time.time()
task_to_runtime_sec[task] = (start, now)
running.remove(task)
# submit new work
while len(running) < chunk_size and todo:
task = todo.pop(0)
running.add(task)
now = time.time()
task_to_runtime_sec[task] = (now, now)
task.start()
update_progress()
if errors:
raise Exception('There were %s errors' % len(errors))
# format runtimes
for task in tasks:
start, end = task_to_runtime_sec[task]
runtimes_sec.append(end - start)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable=broad-except
self.exception = e
class FunctionalTestTask(object):
"""Executes a set of tests given a test class name."""
def __init__(self, test_class_name, verbose, debugger=False):
self.test_class_name = test_class_name
self.verbose = verbose
self.debugger = debugger
def run(self):
if self.verbose:
log('Running all tests in: %s.' % (self.test_class_name))
suite_sh = os.path.join(build_dir(), 'scripts', 'suite.sh')
command = ['sh', suite_sh, self.test_class_name]
if self.debugger:
command.append('--pdb')
result, self.output = run(command, stdout=None, verbose=self.verbose)
if result != 0:
raise Exception()
class DeveloperWorkflowTester(object):
def __init__(self, config):
self.build_dir = config.build_dir
def _run(self, cmd):
result, out = run(cmd, verbose=False)
if result != 0:
raise Exception('Test failed:\n%s', out)
return out
def assert_contains(self, needle, haystack):
if haystack.find(needle) == -1:
raise Exception(
'Expected to find %s, found:\n%s', (needle, haystack))
def test_all(self):
log('Testing developer test workflow')
self.tests = TestsRepo(None)
self.test_class_name_expansion()
self.test_developer_test_workflow_with_test_sh()
self.test_developer_test_workflow_with_project_py()
self.test_developer_test_workflow_with_module_manifest()
def test_class_name_expansion(self):
"""Developer can test all methods of one class."""
# package name expands into class names
tests = self.tests.select_tests_to_run('tests.unit.')
assert 'tests.unit.test_classes.InvokeExistingUnitTest' in tests
assert 'tests.unit.test_classes.EtlRetryTest' in tests
# class name is preserved
tests = self.tests.select_tests_to_run(
'tests.unit.test_classes.InvokeExistingUnitTest')
assert 'tests.unit.test_classes.InvokeExistingUnitTest' in tests
# method name is preserved
tests = self.tests.select_tests_to_run(
'tests.unit.test_classes.InvokeExistingUnitTest.'
'test_string_encoding')
assert(
'tests.unit.test_classes.InvokeExistingUnitTest.'
'test_string_encoding' in tests)
def test_developer_test_workflow_with_test_sh(self):
"""Developer can test one method of one class with project.py."""
cmd = os.path.join(self.build_dir, 'scripts', 'project.py')
# test "module.module.ClassName.method_name" is supported
out = self._run([
'python', cmd, '--test',
'tests.unit.test_classes.'
'InvokeExistingUnitTest.test_string_encoding'])
self.assert_contains(
'tests.unit.test_classes.InvokeExistingUnitTest', out)
# test "method_name (module.module.ClassName)" is supported
out = self._run([
'python', cmd, '--test',
'test_same_formula_body (modules.math.math_tests.MathTagTests)'])
self.assert_contains(
'modules.math.math_tests.MathTagTests.test_same_formula_body', out)
def test_developer_test_workflow_with_project_py(self):
"""Developer can test one method of one class with project.py."""
cmd = os.path.join(self.build_dir, 'scripts',
'project.py')
out = self._run([
'python', cmd,
'--skip_pylint',
'--test', 'tests.unit.test_classes'])
self.assert_contains(
'tests.unit.test_classes.InvokeExistingUnitTest', out)
def test_developer_test_workflow_with_module_manifest(self):
"""Developer can seamlessly run a test declared in module manifest."""
cmd = os.path.join(self.build_dir, 'scripts', 'project.py')
out = self._run([
'python', cmd, '--test', 'modules.math.math_tests'])
self.assert_contains(
'modules.math.math_tests.MathTagTests', out)
class FilesRepo(object):
"""Provides access to all files."""
def __init__(self, config):
self.config = config
self.known_files = self._get_known_files()
self.module_known_files = self._get_module_known_files()
self.all_known_files = self.known_files + self.module_known_files
self.all_known_files.sort()
log('Modules bring %s new files' % (
len(self.module_known_files)))
def _get_known_files(self):
file_list_fn = '%s/scripts/all_files.txt' % self.config.build_dir
return open(file_list_fn).read().splitlines()
def _get_module_known_files(self):
known_files = []
for manifest in self.config.modules.module_to_manifest.values():
files = manifest.data.get('files')
if files:
known_files += files
return known_files
class TestsRepo(object):
"""Provides acces to all known tests."""
def __init__(self, config):
self.config = config
self.integration_tests = all_tests.ALL_INTEGRATION_TEST_CLASSES
self.non_integration_tests = all_tests.ALL_TEST_CLASSES
if config:
integration_tests, non_integration_tests = self._get_modules_tests()
self.integration_tests.update(integration_tests)
self.non_integration_tests.update(non_integration_tests)
self.non_integration_tests.update(self._get_all_third_party_tests())
def _get_all_third_party_tests(self):
yaml_path = os.path.join(
self.config.build_dir, 'scripts', 'third_party_tests.yaml')
if os.path.exists(yaml_path):
with open(yaml_path) as fp:
data = yaml.load(fp)
return data['tests']
else:
return {}
def _get_modules_tests(self):
integration_tests = {}
non_integration_tests = {}
for manifest in self.config.modules.module_to_manifest.values():
module_integration_tests, module_non_integration_tests = (
manifest.get_tests())
integration_tests.update(module_integration_tests)
non_integration_tests.update(module_non_integration_tests)
log(
'Modules bring %s integration and %s non-integration tests' % (
len(integration_tests), len(non_integration_tests)))
return integration_tests, non_integration_tests
def select_tests_to_run(self, test_class_name):
test_classes = {}
test_classes.update(self.integration_tests)
test_classes.update(self.non_integration_tests)
if test_class_name:
_test_classes = {}
for name in test_classes.keys():
# try matching '*'
if TEST_CLASS_NAME_ANY == test_class_name:
_test_classes.update({name: test_classes[name]})
continue
# try matching on the class name
if name.find(test_class_name) == 0:
_test_classes.update({name: test_classes[name]})
continue
# try matching on the method name
if test_class_name.find(name) == 0:
_test_classes.update({test_class_name: 1})
continue
if not _test_classes:
raise Exception(
'No tests found for "%s". (Did you remember to add the '
'test class to scripts/all_tests.py or your module''s '
'manifest.yaml file)?' % test_class_name)
test_classes = _test_classes
sorted_names = sorted(
test_classes, key=lambda key: test_classes[key])
return test_classes
def is_a_member_of(self, test_class_name, set_of_tests):
for name in set_of_tests.keys():
# try matching on the class name
if name.find(test_class_name) == 0:
return True
# try matching on the method name
if test_class_name.find(name) == 0:
return True
return False
def _parse_test_name(self, name):
"""Attempts to convert the argument to a dotted test name.
If the test name is provided in the format output by unittest error
messages (e.g., "my_test (tests.functional.modules_my.MyModuleTest)")
then it is converted to a dotted test name
(e.g., "tests.functional.modules_my.MyModuleTest.my_test"). Otherwise
it is returned unmodified.
"""
if not name:
return name
match = re.match(
r"\s*(?P<method_name>\S+)\s+\((?P<class_name>\S+)\)\s*", name)
if match:
return "{class_name}.{method_name}".format(
class_name=match.group('class_name'),
method_name=match.group('method_name'),
)
else:
return name
def group_tests(self):
# get all applicable tests
test_classes = self.select_tests_to_run(
self._parse_test_name(self.config.parsed_args.test))
# separate out integration and non-integration tests
integration_tests = {}
non_integration_tests = {}
for test_class_name in test_classes.keys():
if self.is_a_member_of(
test_class_name, self.integration_tests):
target = integration_tests
else:
target = non_integration_tests
target.update(
{test_class_name: test_classes[test_class_name]})
# filter out according to command line args
if self.config.parsed_args.skip_non_integration:
log('Skipping non-integration tests at user request')
non_integration_tests = {}
if self.config.parsed_args.skip_integration:
log('Skipping integration test at user request')
integration_tests = {}
_all_tests = {}
_all_tests.update(non_integration_tests)
_all_tests.update(integration_tests)
return _all_tests, integration_tests, non_integration_tests
class ReleaseConfiguration(object):
"""Contains data and methods for a particular release configuration."""
def __init__(self, parsed_args, _build_dir):
self.parsed_args = parsed_args
self.build_dir = os.path.abspath(_build_dir)
self.modules = manifests.ModulesRepo(_build_dir)
log(
'Found %s modules with %s manifests' % (
len(self.modules.modules),
len(self.modules.module_to_manifest.keys())))
self.files = FilesRepo(self)
self.tests = TestsRepo(self)
def walk_folder_tree(home_dir, skip_rel_dirs=None):
fileset = set()
for dir_, _, files in os.walk(home_dir, followlinks=True):
reldir = os.path.relpath(dir_, home_dir)
if skip_rel_dirs:
skip = False
for skip_rel_dir in skip_rel_dirs:
if reldir.startswith('%s%s' % (skip_rel_dir, os.sep)):
skip = True
break
if skip:
continue
for filename in files:
relfile = os.path.join(reldir, filename)
fileset.add(relfile)
return sorted(list(fileset))
def write_text_file(file_name, text):
afile = open(file_name, 'w')
afile.write(text)
afile.close()
def _create_manifests(_build_dir, release_label):
manifest_file = os.path.join(_build_dir, 'VERSION')
third_party_file = os.path.join(_build_dir, 'lib/README')
write_text_file(manifest_file, 'Release: %s' % release_label)
write_text_file(
third_party_file,
"""
This folder contains various third party packages that Course Builder
depends upon. These packages are not developed by Google Inc., but
provided by the open-source developer community. Please review the
licensing terms for each individual package before use.""")
def purge(dir_name, pattern):
"""Deletes files matching pattern from a directory tree."""
for f in os.listdir(dir_name):
current = os.path.join(dir_name, f)
if not os.path.isfile(current):
purge(current, pattern)
else:
if re.search(pattern, f):
os.remove(current)
def remove_dir(dir_name):
"""Deletes a directory."""
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
if os.path.exists(dir_name):
raise Exception('Failed to delete directory: %s' % dir_name)
def chmod_dir_recursive(folder_name, mode):
"""Removes read-only attribute from all files and folders recursively."""
for root, unused_dirs, files in os.walk(folder_name):
for fname in files:
full_path = os.path.join(root, fname)
os.chmod(full_path, mode)
def _zip_all_files(target_dir, release_label):
"""Build and test a release zip file."""
zip_file_name = os.path.join(
target_dir, '%s_%s.zip' % (PRODUCT_NAME, release_label))
log('Zipping: %s' % zip_file_name)
chmod_dir_recursive(build_dir(), 0o777)
# build it
_out = zipfile.ZipFile(zip_file_name, 'w')
for root, unused_dirs, files in os.walk(build_dir()):
base = '/'.join(root.split('/')[3:])
base = os.path.join(PRODUCT_NAME, base)
for afile in files:
_out.write(os.path.join(root, afile),
os.path.join(base, afile))
_out.close()
# verify it
_in = zipfile.ZipFile(zip_file_name, 'r')
_in.testzip()
for afile in _in.filelist:
date = '%d-%02d-%02d %02d:%02d:%02d' % afile.date_time[:6]
_in.close()
def get_import_target(line):
"""Canonicalize import statements into a reliable form.
All of
import foo.bar.baz
from foo.bar import baz
from foo.bar import baz as quux
are canonicalized to
foo.bar.baz
Returns None if line does not contain an import statement. Note that we will
not catch imports done with advanced techniques (__import__, etc.).
"""
target = None
match = re.search(r'^import\ (.+)$', line)
if match:
target = match.groups()[0]
else:
match = re.search(r'^from\ (\S+)\ import\ (\S+)(\ as\ .+)?$', line)
if match:
target = '%s.%s' % tuple(match.groups()[0:2])
return target
def is_disallowed_import_exception(path, target):
return target in DISALLOWED_IMPORTS_EXCEPTIONS.get(path, [])
def _assert_no_disallowed_imports(root):
for cb_path in walk_folder_tree(root):
if not os.path.splitext(cb_path)[1] == PY_FILE_SUFFIX:
continue
if cb_path.startswith('./'):
cb_path = cb_path[2:]
with open(os.path.join(root, cb_path)) as f:
for line in f.readlines():
target = get_import_target(line.strip())
if (target in DISALLOWED_IMPORTS and not
is_disallowed_import_exception(cb_path, target)):
raise Exception(
'Found disallowed import of "%s" in file: %s' % (
target, cb_path))
log('No banned imports found')
def count_files_in_dir(dir_name, suffix=None):
"""Counts files with a given suffix, or all files if suffix is None."""
count = 0
for f in os.listdir(dir_name):
current = os.path.join(dir_name, f)
if os.path.isfile(current):
if not suffix or current.endswith(suffix):
count += 1
else:
count += count_files_in_dir(current, suffix=suffix)
return count
def _enforce_file_count(config):
"""Check that we have exactly the files we expect; delete extras."""
skip_rel_dirs = ['_static']
known_files = config.files.all_known_files
# verify mo files
count_mo_files = count_files_in_dir(
config.build_dir, suffix='.mo')
if count_mo_files != EXPECTED_MO_FILE_COUNT:
raise Exception('Expected %s .mo catalogue files, found %s' %
(EXPECTED_MO_FILE_COUNT, count_mo_files))
if known_files:
# list files
all_files = walk_folder_tree(
config.build_dir, skip_rel_dirs=skip_rel_dirs)
# delete extras
remove_count = 0
remove_valid_looking_count = 0
for afile in all_files:
if afile not in known_files:
suffix = afile.rsplit('.', 1)[-1]
if (suffix in VALID_FILE_SUFFIXES and
not any([afile.startswith(p) for p in IGNORE_PREFIXES]) and
not any([regex.match(afile) for regex in IGNORE_REGEXES])):
log('Warning: Found a file that looks valid, but is not '
'listed in any manifest nor scripts/all_files.txt. '
'This is probably a problem: %s' % afile)
remove_valid_looking_count += 1
fn = os.path.join(config.build_dir, afile)
os.remove(fn)
remove_count += 1
if remove_count:
log('WARNING: removed %s unlisted files' % remove_count)
if remove_valid_looking_count:
raise ValueError('Please add names of valid-looking files to '
'manifests, or remove the spurious files.')
# list files again; check no extras
all_files = walk_folder_tree(
config.build_dir, skip_rel_dirs=skip_rel_dirs)
if all_files != known_files:
diff = difflib.unified_diff(
all_files, known_files, lineterm='')
raise Exception(
'Folder contents differs from expected:\n%s' % (
'\n'.join(list(diff))))
if known_files:
log('File count enforced: %s *.mo amongst %s other known files' % (
EXPECTED_MO_FILE_COUNT, len(known_files)))
else:
log('File count enforced: %s *.mo files' % EXPECTED_MO_FILE_COUNT)
def _setup_all_dependencies():
"""Setup all third party Python packages."""
common_sh = os.path.join(build_dir(), 'scripts', 'common.sh')
log('Installing dependencies by running %s' % common_sh)
result, output = run(['sh', common_sh], strict=True)
if result != 0:
raise Exception()
for line in output.split('\n'):
if not line:
continue
# ignore garbage produced by the script; it proven impossible to
# fix the script to avoid garbage from being produced
if 'grep: write error' in line or 'grep: writing output' in line:
continue
log(line)
def assert_handler(url, handler):
"""Verifies (via response headers) that URL is not served by CB handler."""
last_attempt = 4
url = INTEGRATION_SERVER_BASE_URL + url
for attempt in xrange(1, last_attempt):
try:
result = urllib2.urlopen(url, timeout=10)
break
except urllib2.URLError as e:
# Sometimes the server has not yet restarted and connections are
# refused, so the timeout mechanism won't retry. Do it manually.
log('Unable to open %s on attempt %s' % (url, attempt))
if attempt != last_attempt - 1:
time.sleep(5)
else:
raise e
assert result.getcode() == 200
headers = dict(result.headers)
specified_handler = headers.get(GCB_HANDLER_CLASS_HEADER_NAME, None)
if not specified_handler == handler:
raise Exception(
'Failed to find header %s with value %s in url %s '
'having response headers %s' % (
GCB_HANDLER_CLASS_HEADER_NAME, handler, url, headers))
def assert_gcb_allow_static_serv_is_disabled():
log('Making sure static serving disabled')
assert not os.path.exists(os.path.join(build_dir(), 'lib', '_static'))
for url, handler in STATIC_SERV_URLS:
assert_handler(url, handler)
for url, handler in COMBO_SERV_URLS:
assert_handler(url, handler)
def assert_gcb_allow_static_serv_is_enabled():
log('Making sure static serving enabled')
assert os.path.exists(os.path.join(build_dir(), 'lib', '_static'))
for url, _ in STATIC_SERV_URLS:
assert_handler(url, None)
def _run_all_tests(config):
_all_tests, integration_tests, non_integration_tests = (
config.tests.group_tests())
with_server = bool(integration_tests)
test_static_serv = not bool(config.parsed_args.test)
if test_static_serv and with_server:
with WithReleaseConfiguration(True, False, config):
assert_gcb_allow_static_serv_is_disabled()
with WithReleaseConfiguration(with_server, True, config):
if with_server and not config.parsed_args.skip_integration_setup:
if test_static_serv:
assert_gcb_allow_static_serv_is_enabled()
_run_tests(
{
'tests.integration.test_classes.'
'IntegrationServerInitializationTask': 1},
False, chunk_size=1, hint='setup')
if _all_tests:
_run_tests(
_all_tests, config.parsed_args.verbose,
chunk_size=_get_concurrent_test_count(config),
debugger=config.parsed_args.pdb)
def _get_concurrent_test_count(config):
if config.parsed_args.concurrent_tests is not None:
return config.parsed_args.concurrent_tests
try:
return 2 * multiprocessing.cpu_count()
except: # pylint: disable=bare-except
return 8
def _run_tests(
test_classes, verbose, chunk_size=16, hint='generic', debugger=False):
start = time.time()
task_to_test = {}
tasks = []
integration_tasks = []
# Prepare tasks
for test_class_name in test_classes:
test = FunctionalTestTask(test_class_name, verbose, debugger=debugger)
task = TaskThread(test.run, name='testing %s' % test_class_name)
task_to_test[task] = test
tasks.append(task)
# order tests by their size largest to smallest
tasks = sorted(
tasks,
key=lambda task: test_classes.get(task_to_test[task].test_class_name),
reverse=True)
# execute all tasks
log('Executing %s "%s" test suites' % (len(tasks), hint))
runtimes_sec = []
TaskThread.execute_task_list(
tasks, chunk_size=chunk_size, runtimes_sec=runtimes_sec)
# map durations to names
name_durations = []
for index, duration in enumerate(runtimes_sec):
name_durations.append((
round(duration, 2), task_to_test[tasks[index]].test_class_name))
# report all longest first
if name_durations:
log('Reporting execution times for upto 10 longest tests')
for duration, name in sorted(
name_durations, key=lambda name_duration: name_duration[0],
reverse=True)[:10]:
log('Took %ss for %s' % (int(duration), name))
# Check we ran all tests as expected.
total_count = 0
for task in tasks:
test = task_to_test[task]
# Check that no unexpected tests were picked up via automatic discovery,
# and that the number of tests run in a particular suite.py invocation
# matches the expected number of tests.
test_count = test_classes.get(test.test_class_name, None)
expected_text = 'INFO: All %s tests PASSED!' % test_count
if test_count is None:
log('%s\n\nERROR: ran unexpected test class %s' % (
test.output, test.test_class_name))
if expected_text not in test.output:
log('%s\n\nERROR: Expected %s tests to be run for the test class '
'%s, but found some other number.' % (
test.output, test_count, test.test_class_name))
raise Exception()
total_count += test_count
log('Ran %s tests in %s test classes; took %ss' % (
total_count, len(tasks), int(time.time() - start)))
def _run_lint():
# Wire outputs to our own stdout/stderr so messages appear immediately,
# rather than batching up and waiting for the end (linting takes a while)
path = os.path.join(build_dir(), 'scripts', 'pylint.sh')
status = subprocess.call(path, stdin=None, stdout=sys.stdout,
stderr=sys.stderr)
return status == 0
def _dry_run(parsed_args):
_run_tests(
all_tests.INTERNAL_TEST_CLASSES, parsed_args.verbose, hint="dry run")
def _lint(parsed_args):
if parsed_args.skip_pylint:
log('Skipping pylint at user request')
else:
if not _run_lint():
if parsed_args.ignore_pylint_failures:
log('Ignoring pylint test errors.')
else:
raise RuntimeError('Pylint tests failed.')
def _is_external_symlink(link_path, root_path):
is_external = (
os.path.islink(link_path)
and not os.path.realpath(link_path).startswith(root_path))
if is_external and link_path.startswith(os.path.realpath(link_path)):
raise Exception("Circular external symlink: {}".format(link_path))
return is_external
class CopyTask(object):
def __init__(self, from_path, to_path):
self.from_path = from_path
self.to_path = to_path
def perform(self):
if os.path.isdir(self.from_path):
shutil.copytree(self.from_path, self.to_path)
else:
shutil.copy2(self.from_path, self.to_path)
def _symlink_copy_task(path, source_dir, dest_dir):
return CopyTask(
os.path.realpath(path),
os.path.join(dest_dir, os.path.relpath(path, source_dir)))
def _do_copy_tasks(copy_tasks):
for task in copy_tasks:
task.perform()
def _copy_files(source_dir_name, build_dir_name):
"""Copies local files and files referenced by external symlinks"""
# Work-around for lack of 'nonlocal' keyword in this version of Python
external_copy_tasks = [[]]
def ignore_non_core_files(path, names):
"""Picks files to not copy: Ignore external and downloaded content."""
ignored_names = set([name for name in names if
_is_external_symlink(os.path.join(path, name), source_dir_name)])
external_copy_tasks[0] += [
_symlink_copy_task(
os.path.join(path, name), source_dir_name, build_dir_name)
for name in ignored_names]
# Don't copy 'lib' directory at the top level; release tests want to
# set up for static and nonstatic serving, so leave creation of lib
# for test-run time, rather than copying setup from developer work.
if 'app.yaml' in names and 'lib' in names:
ignored_names.add('lib')
return ignored_names
log('Copying local files...')
shutil.copytree(
source_dir_name, build_dir_name, symlinks=True,
ignore=ignore_non_core_files)
log('Copying external files...')
_do_copy_tasks(external_copy_tasks[0])
def _prepare_filesystem(
source_dir_name, target_dir_name, build_dir_name, deep_clean=False):
"""Prepare various directories used in the release process."""
log('Working directory: %s' % os.getcwd())
log('Source directory: %s' % source_dir_name)
log('Target directory: %s' % target_dir_name)
log('Build temp directory: %s' % build_dir_name)
remove_dir(build_dir_name)
_copy_files(source_dir_name, build_dir_name)
shell_env = _get_config_sh_shell_env()
if deep_clean:
dirs_to_remove = [
os.path.join(
os.path.expanduser("~"),
_get_coursebuilder_resources_path(shell_env)),
os.path.join(build_dir_name, 'lib')
]
log('Deep cleaning %s' % ', '.join(dirs_to_remove))
for _dir in dirs_to_remove:
remove_dir(_dir)
if not os.path.exists(target_dir_name):
log('Creating target directory: %s' % target_dir_name)
os.makedirs(target_dir_name)
def _save_log(target_dir, release_label):
log_path = os.path.join(target_dir, 'log_%s.txt' % release_label)
log('Saving log to: %s' % log_path)
write_text_file(log_path, '%s' % '\n'.join(LOG_LINES))
def _test_developer_workflow(config):
DeveloperWorkflowTester(config).test_all()
def _set_up_imports():
global all_tests
global manifests
global schema_fields
global schema_transforms
# when this runs, the environment is not yet setup; as a minimum,
# we need access to our own code; provide it here
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# no third party packages are available or will ever be available in this
# module including Google App Engine SDK, but we can safely access our own
# code, IF AND ONLY IF, it does not have any dependencies
# pylint: disable=redefined-outer-name
from scripts import all_tests
from common import manifests
from common import schema_fields
from common import schema_transforms
def _do_a_release(source_dir, target_dir, release_label):
"""Creates/validates an official release of CourseBuilder.
Args:
source_dir: a string specifying source folder for the project
target_dir: a string specifying target folder for output
build_dir: a string specifying temp folder to do a build in
release_label: a string with text label for the release
Here is what this function does:
- creates a target_dir, build_dir; cleans temp directories
- copies all files from source_dir to the build_dir
- deletes all files from build_dir that should not be released
- adds VERSION and manifest file
- scans all modules and adds tests/files listed in manifests
- sets up all third party dependencies
- brings up and tears down integration server
- checks banned imports
- tests developer workflow
- runs all tests in the build_dir and checks they pass
- creates a zip file
- copies a resulting zip file and log file to target_dir
"""
del LOG_LINES[:]
parsed_args = make_default_parser().parse_args()
start = time.time()
log('Starting Course Builder release: %s' % release_label)
_prepare_filesystem(
source_dir, target_dir, build_dir(), deep_clean=parsed_args.deep_clean)
_setup_all_dependencies()
_create_manifests(build_dir(), release_label)
config = ReleaseConfiguration(parsed_args, build_dir())
_dry_run(parsed_args)
_lint(parsed_args)
_assert_no_disallowed_imports(build_dir())
_test_developer_workflow(config)
_enforce_file_count(config)
_run_all_tests(config)
_enforce_file_count(config)
_zip_all_files(target_dir, release_label)
remove_dir(build_dir())
_save_log(target_dir, release_label)
log('Done release in %ss: find results in %s' % (
int(time.time() - start), target_dir))
return 0
def _test_only(parsed_args):
"""Runs a set of tests as specific by command line arguments."""
global BUILD_DIR # pylint: disable=global-statement
BUILD_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
log('Running task "test": %s' % parsed_args)
log('Working directory: %s' % os.getcwd())
log('Source directory: %s' % build_dir())
if parsed_args.deep_clean:
raise Exception(
'Unable to use --deep_clean flag without --do_a_release flag.')
_setup_all_dependencies()
if not parsed_args.test:
_lint(parsed_args)
return _run_all_tests(ReleaseConfiguration(parsed_args, BUILD_DIR))
def _test_and_release(parsed_args):
"""Runs an entire release process with all tests and configurations."""
release_label = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
global BUILD_DIR # pylint: disable=global-statement
BUILD_DIR = '/tmp/%s-build-%s' % (PRODUCT_NAME, release_label)
log('Running task "release": %s' % parsed_args)
return _do_a_release(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..')),
os.getcwd(), release_label)
def _get_config_sh_shell_env():
config_sh = os.path.join(os.path.dirname(__file__), 'config.sh')
output = subprocess.check_output(
'source %s; env -0' % config_sh, executable='/bin/bash', shell=True)
env = {}
for line in output.split('\0'):
parts = line.split('=')
env[parts[0]] = parts[-1]
return env
def _get_coursebuilder_resources_path(shell_env):
coursebuilder_resources_path = shell_env.get('COURSEBUILDER_RESOURCES')
assert coursebuilder_resources_path
return coursebuilder_resources_path
def _also_log_to_file(parsed_args):
if parsed_args.also_log_to_file:
global LOG_PATH # pylint: disable=global-statement
LOG_PATH = _log_file(parsed_args.also_log_to_file)
root_logger = logging.getLogger()
file_handler = logging.FileHandler(LOG_PATH)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
root_logger.addHandler(console_handler)
curent_level = root_logger.getEffectiveLevel()
root_logger.setLevel(logging.INFO)
logging.info("%s\tLogging to both console *and* '%s'",
datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), LOG_PATH)
root_logger.setLevel(curent_level)
def main():
parsed_args = make_default_parser().parse_args()
_also_log_to_file(parsed_args)
if parsed_args.release:
return _test_and_release(parsed_args)
if parsed_args.test:
return _test_only(parsed_args)
return make_default_parser().print_help()
if __name__ == '__main__':
_set_up_imports()
main()
|
{
"content_hash": "17ad175fffb441ecb1d63503dc8a23a5",
"timestamp": "",
"source": "github",
"line_count": 1424,
"max_line_length": 80,
"avg_line_length": 35.438202247191015,
"alnum_prop": 0.6006063728598605,
"repo_name": "andela-angene/coursebuilder-core",
"id": "c5b00274487208d4477728b07d23e0c9dfc0bf85",
"size": "51081",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop-frontend",
"path": "coursebuilder/scripts/project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "729194"
},
{
"name": "HTML",
"bytes": "739873"
},
{
"name": "JavaScript",
"bytes": "720406"
},
{
"name": "Python",
"bytes": "6245524"
},
{
"name": "Shell",
"bytes": "53815"
}
],
"symlink_target": ""
}
|
from p4_core import *
from p4_headers import p4_header_instance, P4_NEXT, p4_field_reference
import p4_imperatives
from p4_expressions import p4_expression
from p4_hlir.util.OrderedSet import OrderedSet
from collections import OrderedDict, defaultdict
from p4_hlir.util.topo_sorting import Graph, Node
p4_parser_exception_keywords = p4_create_enum("p4_parse_state_keywords", [
"P4_PARSER_DROP",
])
P4_PARSER_DROP = p4_parser_exception_keywords.P4_PARSER_DROP
class p4_parser_exception (p4_object):
"""
TODO
"""
required_attributes = ["name", "set_statements", "return_or_drop"]
allowed_attributes = required_attributes + []
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
hlir.p4_parser_exceptions[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_parser_exceptions[name]
def build(self, hlir):
for idx, set_statement in enumerate(self.set_statements):
metadata_field_ref = set_statement[1]
metadata_value = set_statement[2]
metadata_field_ref = p4_field_reference(hlir, metadata_field_ref)
# metadata_value can either be latest.*, or *.* or int or
# (*, *) (for current)
if type(metadata_value) is int:
metadata_value = metadata_value
elif type(metadata_value) is tuple:
metadata_value = (metadata_value[1], metadata_value[2])
elif type(metadata_value) is str:
hdr, field = metadata_value.split(".")
if hdr == "latest":
metadata_value = p4_field_reference(
hlir,
self.latest_extraction.name + "." + field
)
else:
metadata_value = p4_field_reference(hlir, metadata_value)
else:
assert(False)
self.set_statements[idx] = (parse_call.set, metadata_field_ref, metadata_value)
if self.return_or_drop != P4_PARSER_DROP:
self.return_or_drop = hlir.p4_control_flows[self.return_or_drop]
class p4_parse_value_set(p4_object):
"""
TODO
"""
required_attributes = ["name"]
allowed_attributes = required_attributes + ["max_size"]
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
if not hasattr(self, "max_size"):
self.max_size = 128 # TODO: reasonable default?
hlir.p4_parse_value_sets[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_parse_value_sets[name]
def build(self, hlir):
pass
p4_parse_state_keywords = p4_create_enum("p4_parse_state_keywords", [
"P4_DEFAULT",
])
P4_DEFAULT = p4_parse_state_keywords.P4_DEFAULT
parse_call = p4_create_enum("parse_call", [
"extract",
"set",
"counter_init",
"counter_dec"
])
class p4_parse_state (p4_object):
"""
TODO
"""
required_attributes = ["name", "call_sequence", "return_statement"]
allowed_attributes = required_attributes
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
self.branch_on = []
self.branch_to = OrderedDict()
self.prev = OrderedSet()
self.latest_extraction = None
hlir.p4_parse_states[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_parse_states[name]
def build_body (self, hlir):
for idx, call in enumerate(self.call_sequence):
call_type = call[0]
if call_type == "extract":
extract_ref = call[1]
extract_ref = hlir.p4_header_instances[extract_ref]
self.latest_extraction = extract_ref
self.call_sequence[idx] = (parse_call.extract, extract_ref)
elif call_type == "set_metadata":
metadata_field_ref = call[1]
metadata_value = call[2]
metadata_field_ref = p4_field_reference(hlir, metadata_field_ref)
# metadata_value can either be latest.*, or *.* or int or
# (*, *) (for current)
if type(metadata_value) is int:
metadata_value = metadata_value
elif type(metadata_value) is tuple:
metadata_value = (metadata_value[0], metadata_value[1])
elif type(metadata_value) is str:
hdr, field = metadata_value.split(".")
if hdr == "latest":
metadata_value = p4_field_reference(
hlir,
self.latest_extraction.name + "." + field
)
else:
metadata_value = p4_field_reference(hlir, metadata_value)
elif type(metadata_value) is p4_expression:
metadata_value = metadata_value
metadata_value.resolve_names(hlir)
else:
print type(metadata_value)
assert(False)
self.call_sequence[idx] = (parse_call.set, metadata_field_ref, metadata_value)
def build_return (self, hlir):
return_type = self.return_statement[0]
if return_type == "immediate":
next_state = self.resolve_parse_target(hlir, self.return_statement[1])
self.branch_on = []
self.branch_to = OrderedDict({P4_DEFAULT:next_state})
elif return_type == "select":
select_exp = self.return_statement[1]
select_cases = self.return_statement[2]
# select_exp is a list of field_references
self.branch_on = []
for field_ref in select_exp:
if type(field_ref) is tuple: # current
field_ref = (field_ref[0], field_ref[1])
elif field_ref[:6] == "latest":
field_ref = p4_field_reference(
hlir,
self.latest_extraction.name + "." + field_ref[7:]
)
elif "." in field_ref:
field_ref = p4_field_reference(hlir, field_ref)
self.branch_on.append(field_ref)
self.branch_to = OrderedDict()
for case in select_cases:
value_list = case[0]
next_state = self.resolve_parse_target(hlir, case[1])
for value_or_masked in value_list:
value_type = value_or_masked[0]
if value_type == "value_set":
# still need to check that this is a valid reference
value_set_name = value_or_masked[1]
branch_case = hlir.p4_parse_value_sets[value_set_name]
elif value_type == "default":
branch_case = P4_DEFAULT
elif value_type == "value":
branch_case = value_or_masked[1]
elif value_type == "masked_value":
branch_case = (value_or_masked[1], value_or_masked[2])
self.branch_to[branch_case] = next_state
else:
assert(False)
def build (self, hlir):
self.build_body(hlir)
self.build_return(hlir)
def resolve_parse_target(self, hlir, target_name):
"""
Resolve the name of a possible next-state in a parse state to the actual
object it's referring to, either:
- Another p4_parse_state
- A control flow function, which is later (after validation) resolved to
the first table graph node arrived at in the function
"""
if type(target_name) is tuple:
assert(target_name[0] == "parse_error")
assert(target_name[1] in hlir.p4_parser_exceptions)
dst = hlir.p4_parser_exceptions[target_name[1]]
elif target_name in hlir.p4_parse_states:
# Parse state
dst = hlir.p4_parse_states[target_name]
dst.prev.add(self)
elif target_name in hlir.p4_control_flows:
# Control function
dst = hlir.p4_control_flows[target_name]
else:
assert(False)
return dst
|
{
"content_hash": "dcbf280ebffcdd1e0a2c2b44feedc3b8",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 94,
"avg_line_length": 36.375,
"alnum_prop": 0.5352806414662085,
"repo_name": "hanw/p4-hlir",
"id": "ff3f5870851aaf73b9e276f839d96f926d85e8e9",
"size": "9328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p4_hlir/hlir/p4_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "361669"
}
],
"symlink_target": ""
}
|
import json
import os
import unittest
from attributes.management import main
from lib import database
class MainTestCase(unittest.TestCase):
def setUp(self):
path = (
os.path.join(
os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
),
'config.json'
)
)
settings = None
with open(path, 'r') as file_:
settings = json.load(file_)['options']['datasource']
self.database = database.Database(settings)
def test_main(self):
# Arrange
project_id = 10868464 # andymeneely/squib
options = {
'threshold': 1,
'today': '2015-04-01'
}
# Act
try:
self.database.connect()
with self.database.cursor() as cursor:
(result, value) = main.run(project_id, '', cursor, **options)
finally:
self.database.disconnect()
# Assert
self.assertTrue(result)
self.assertEqual(6.75, value)
# Arrange
project_id = 67 # victorlin/loso
options = {
'threshold': 1,
'today': '2015-04-01'
}
# Act
try:
self.database.connect()
with self.database.cursor() as cursor:
(result, value) = main.run(project_id, '', cursor, **options)
finally:
self.database.disconnect()
# Assert
self.assertFalse(result)
self.assertEqual(0, value)
# Arrange
project_id = 18228981 # sleeper/oauth2
options = {
'threshold': 1,
'today': '2015-04-01'
}
# Act
try:
self.database.connect()
with self.database.cursor() as cursor:
(result, value) = main.run(project_id, '', cursor, **options)
finally:
self.database.disconnect()
# Assert
self.assertFalse(result)
self.assertEqual(0, value)
|
{
"content_hash": "39fa2272187a23ec1e0a1a99d00bd19f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 25.823529411764707,
"alnum_prop": 0.48291571753986334,
"repo_name": "RepoReapers/reaper",
"id": "5155bed40fef4b84c26b5120d8497248f4e70665",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/management/test_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "133"
},
{
"name": "JavaScript",
"bytes": "392"
},
{
"name": "Objective-C",
"bytes": "59"
},
{
"name": "Python",
"bytes": "145851"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
from __future__ import division
import numpy as np
from scipy import ndimage
from scipy.interpolate import UnivariateSpline
from scipy.optimize import leastsq
import abel
#########################################################################
# circularize.py
#
# Image circularization by following peak intensity vs angle
# see https://github.com/PyAbel/PyAbel/issues/186 for discussion
# and https://github.com/PyAbel/PyAbel/pull/195
#
# Steve Gibson and Dan Hickstein - ideas/code
# Jason Gascooke - ideas
#
# February 2017
#########################################################################
def circularize_image(IM, method="lsq", center=None, radial_range=None,
dr=0.5, dt=0.5, smooth=0, ref_angle=None,
inverse=False, return_correction=False):
"""
Corrects image distortion on the basis that the structure should be
circular.
This function is especially useful for correcting the image obtained with
a velocity-map-imaging spectrometer, in the case where there is distortion
of the Newton Sphere (ring) structure due to an imperfect electrostatic
lens or stray electromagnetic fields. The correction allows the
highest-resolution 1D photoelectron distribution to be extracted.
The algorithm splits the image into "slices" at many different angles
(set by `dt`) and compares the radial intensity profile of adjacent slices.
A scaling factor is found which aligns each slice profile with the previous
slice. The image is then corrected using a spline function that smoothly
connects the discrete scaling factors as a continuous function of angle.
This circularization algorithm should only be applied to a well-centered
image, otherwise use the `center` keyword (described below) to
center it.
Parameters
----------
IM : numpy 2D array
Image to be circularized.
method : str
Method used to determine the radial correction factor to align slice
profiles:
``argmax`` - compare intensity-profile.argmax() of each radial slice.
This method is quick and reliable, but it assumes that
the radial intensity profile has an obvious maximum.
The positioning is limited to the nearest pixel.
``lsq`` - minimize the difference between a slice intensity-profile
with its adjacent slice.
This method is slower and may fail to converge, but it
may be applied to images with any (circular) structure.
It aligns the slices with sub-pixel precision.
center : str, float tuple, or None
Pre-center image using :func:`abel.tools.center.center_image`.
`center` may be: `com`, `convolution`, `gaussian`,
`image_center`, `slice`, or a float tuple center :math:`(y, x)`.
radial_range : tuple, or None
Limit slice comparison to the radial range tuple (rmin, rmax), in
pixels, from the image center. Use to determine the distortion
correction associated with particular peaks. It is recommended to
select a region of your image where the signal-to-noise is highest,
with sharp persistant (in angle) features.
dr : float
Radial grid size for the polar coordinate image, default = 0.5 pixel.
This is passed to :func:`abel.tools.polar.reproject_image_into_polar`.
Small values may improve the distortion correction, which is often of
sub-pixel dimensions, at the cost of reduced signal to noise for the
slice intensity profile. As a general rule, `dr` should be
significantly smaller than the radial "feature size" in the image.
dt : float
Angular grid size. This sets the number of radial slices, given by
:math:`2\pi/dt`. Default = 0.1, ~ 63 slices. More slices, using
smaller `dt`, may provide a more detailed angular variation of the
correction, at the cost of greater signal to noise in the correction
function.
Also passed to :func:`abel.tools.polar.reproject_image_into_polar`
smooth : float
This value is passed to the :func:`scipy.interpolate.UnivariateSpline`
function and controls how smooth the spline interpolation is. A value
of zero corresponds to a spline that runs through all of the points,
and higher values correspond to a smoother spline function.
It is important to examine the relative peak position (scaling factor)
data and how well it is represented by the spline function. Use the
option ``return_correction=True`` to examine this data. Typically,
`smooth` may remain zero, noisy data may require some smoothing.
ref_angle : `None` or float
Reference angle for which radial coordinate is unchanged.
Angle varies between :math:`-\pi` to :math:`\pi`, with zero angle
vertical.
`None` uses :func:`numpy.mean(radial scale factors)`, which attempts
to maintain the same average radial scaling. This approximation is
likely valid, unless you know for certain that a specific angle of
your image corresponds to an undistorted image.
inverse : bool
Apply an inverse Abel transform the **polar** coordinate image, to
remove the background intensity. This may improve the signal to noise,
allowing the weaker intensity featured to be followed in angle.
Note that this step is only for the purposes of allowing the algorithm
to better follow peaks in the image. It does not affect the final
image that is returned, except for (hopefully) slightly improving the
precision of the distortion correction.
return_correction : bool
Additional outputs, as describe below.
Returns
-------
IMcirc : numpy 2D array, same size as input
Circularized version of the input image.
The following values are returned if ``return_correction=True``:
angles : numpy 1D array
Mid-point angle (radians) of each image slice.
radial_correction : numpy 1D array
Radial correction scale factor at each angular slice.
radial_correction_function : numpy function that accepts numpy.array
Function that may be used to evaluate the radial correction at any
angle.
"""
if center is not None:
# convenience function for the case image is not centered
IM = abel.tools.center.center_image(IM, center=center)
# map image into polar coordinates - much easier to slice
# cartesian (Y, X) -> polar (Radius, Theta)
polarIM, radial_coord, angle_coord =\
abel.tools.polar.reproject_image_into_polar(IM, dr=dr, dt=dt)
if inverse:
# pseudo inverse Abel transform of the polar image, removes background
# to enhance transition peaks
polarIM = abel.dasch.two_point_transform(polarIM.T).T
# more convenient 1-D coordinate arrays
angles = angle_coord[0] # angle coordinate
radial = radial_coord[:, 0] # radial coordinate
# limit radial range of polar image, if selected
if radial_range is not None:
subr = np.logical_and(radial > radial_range[0],
radial < radial_range[1])
polarIM = polarIM[subr]
radial = radial[subr]
# evaluate radial correction factor that aligns each angular slice
radcorr = correction(polarIM.T, angles, radial, method=method)
# spline radial correction vs angle
radial_correction_function = UnivariateSpline(angles, radcorr, s=smooth,
ext=3)
# apply the correction
IMcirc = circularize(IM, radial_correction_function, ref_angle=ref_angle)
if return_correction:
return IMcirc, angles, radcorr, radial_correction_function
else:
return IMcirc
def circularize(IM, radial_correction_function, ref_angle=None):
"""
Remap image from its distorted grid to the true cartesian grid.
Parameters
----------
IM : numpy 2D array
Original image
radial_correction_function : funct
A function returning the radial correction for a given angle. It
should accept a numpy 1D array of angles.
"""
# cartesian coordinate system
Y, X = np.indices(IM.shape)
row, col = IM.shape
origin = (col//2, row//2) # odd image
# coordinates relative to center
X -= origin[0]
Y = origin[1] - Y # negative values below the axis
theta = np.arctan2(X, Y) # referenced to vertical direction
# radial scale factor at angle = ref_angle
if ref_angle is None:
factor = np.mean(radial_correction_function(theta))
else:
factor = radial_correction_function(ref_angle)
# radial correction
Xactual = X*factor/radial_correction_function(theta)
Yactual = Y*factor/radial_correction_function(theta)
# @DanHickstein magic
# https://github.com/PyAbel/PyAbel/issues/186#issuecomment-275471271
IMcirc = ndimage.interpolation.map_coordinates(IM,
(origin[1] - Yactual, Xactual + origin[0]))
return IMcirc
def _residual(param, radial, profile, previous):
""" `scipy.optimize.leastsq` residuals function.
Evaluate the difference between a radial-scaled intensity profile
and its adjacent "previous" angular slice.
"""
radial_scaling, amplitude = param[0], param[1]
newradial = radial*radial_scaling
spline_prof = UnivariateSpline(newradial, profile, s=0, ext=3)
newprof = spline_prof(radial)*amplitude
# residual cf adjacent slice profile
return newprof - previous
def correction(polarIMTrans, angles, radial, method):
"""Determines a radial correction factors that align an angular slice
radial intensity profile with its adjacent (previous) slice profile.
Parameters
----------
polarIMTrans : numpy 2D array
Polar coordinate image, transposed :math:`(\\theta, r)` so that each
row is a single angle.
angles : numpy 1D array
Angle coordinates for one row of `polarIMTrans`.
radial : numpy 1D array
Radial coordinates for one column of `polarIMTrans`.
method : str
"argmax": radial correction factor from position of maximum intensity.
"lsq" : least-squares determine a radial correction factor that
will align a radial intensity profile with the previous, adjacent
slice.
"""
if method == "argmax":
# follow position of intensity maximum
pkpos = []
for ang, aslice in zip(angles, polarIMTrans):
profile = aslice
pkpos.append(profile.argmax()) # store index of peak position
# radial correction factor relative to peak max in first angular slice
radcorr = radial[pkpos[0]]/radial[pkpos]
elif method == "lsq":
# least-squares radially scale intensity profile matching previous slice
# initial guess fit parameters: radial correction factor, and amplitude
fitpar = np.array([1.0, 1.0])
# storage for the radial correction factors
radcorr = []
radcorr.append(1) # first slice nothing to compare with
previous = polarIMTrans[0]
for ang, aslice in zip(angles[1:], polarIMTrans[1:]):
profile = aslice
result = leastsq(_residual, fitpar, args=(radial, profile,
previous))
radcorr.append(result[0][0]) # radial scale factor direct from lsq
previous += _residual(result[0], radial, profile, previous)
# This "previous" slice corresponds to the previous slice intensity
# profile that has # been re-scaled. Thus, if the next slice is
# identical, it will be assigned a scale factor of 1.0
# use the determined radial scale factor, and amplitude parameters
# for the next slice
fitpar = result[0]
else:
raise ValueError("method variable must be one of 'argmax' or 'lsq',"
" not '{}'".format(method))
return radcorr
|
{
"content_hash": "fa536121ae59b8d5ac7fac3d421c6d49",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 80,
"avg_line_length": 38.05295950155763,
"alnum_prop": 0.6605812525583299,
"repo_name": "DhrubajyotiDas/PyAbel",
"id": "0c3a924ec5ea8fbc22f405654560a1a7a4833f7e",
"size": "12239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abel/tools/circularize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "205660"
}
],
"symlink_target": ""
}
|
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.style.main import change_directory
class ChangeDirectoryTest(LoggingTestCase):
_original_directory = '/original'
_checkout_root = '/chromium/src'
def setUp(self):
super(ChangeDirectoryTest, self).setUp()
self.filesystem = MockFileSystem(
dirs=[self._original_directory, self._checkout_root],
cwd=self._original_directory)
def _change_directory(self, paths, checkout_root):
return change_directory(
self.filesystem, paths=paths, checkout_root=checkout_root)
def _assert_result(self, actual_return_value, expected_return_value,
expected_log_messages, expected_current_directory):
self.assertEqual(actual_return_value, expected_return_value)
self.assertLog(expected_log_messages)
self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
def test_paths_none(self):
paths = self._change_directory(
checkout_root=self._checkout_root, paths=None)
self._assert_result(paths, None, [], self._checkout_root)
def test_paths_convertible(self):
paths = ['/chromium/src/foo1.txt', '/chromium/src/foo2.txt']
paths = self._change_directory(
checkout_root=self._checkout_root, paths=paths)
self._assert_result(paths, ['foo1.txt', 'foo2.txt'], [],
self._checkout_root)
def test_with_git_paths_unconvertible(self):
paths = ['/chromium/src/foo1.txt', '/outside/foo2.txt']
paths = self._change_directory(
checkout_root=self._checkout_root, paths=paths)
log_messages = [
"""WARNING: Path-dependent style checks may not work correctly:
One of the given paths is outside the repository of the current
working directory:
Path: /outside/foo2.txt
Checkout root: /chromium/src
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""
]
self._assert_result(paths, paths, log_messages,
self._original_directory)
|
{
"content_hash": "eafaf160fe7ccd6c2f1cb471dc296704",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 39.35087719298246,
"alnum_prop": 0.6598305840392331,
"repo_name": "chromium/chromium",
"id": "43bb70cc9f2be02c9dbd28218a8399a75b6f040d",
"size": "3579",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "third_party/blink/tools/blinkpy/style/main_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.apps import AppConfig
class MarkersConfig(AppConfig):
name = 'markers'
|
{
"content_hash": "48d1ee78f1b0e583a45d5fadc4d577dd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "pabulumm/neighbors",
"id": "f703edede08ad35c0ea1f34204d1139fd51ad639",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markers/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "167622"
},
{
"name": "HTML",
"bytes": "221496"
},
{
"name": "JavaScript",
"bytes": "325471"
},
{
"name": "Python",
"bytes": "7896264"
},
{
"name": "Shell",
"bytes": "12645"
},
{
"name": "Smarty",
"bytes": "789"
}
],
"symlink_target": ""
}
|
import optproblems.cec2005
import numpy as np
import time
from ICA import *
import os
if __name__ == "__main__":
dim = 30
repeats = 10
evaluations = 10000*dim
countries = 30
imperialists = 6
if not os.path.exists('results'):
os.makedirs('results')
if not os.path.exists('convergence'):
os.makedirs('convergence')
np.random.seed(10)
f3 = optproblems.cec2005.F3(dim)
time1 = time.time()
results = np.array([ICA(f3, dim=dim, evaluation_criteria=True,
max_eval=evaluations, ncountries=countries, nimperialists=imperialists,
lower_bound=-100, upper_bound=100) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/ICA-results-30-3.txt", "w") as file:
print("F3: Shifted Rotated High Conditioned Elliptic Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/ICA-convergence-30-3.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f8 = optproblems.cec2005.F8(dim)
time1 = time.time()
results = np.array([ICA(f8, dim=dim, evaluation_criteria=True,
max_eval=evaluations, ncountries=countries, nimperialists=imperialists,
lower_bound=-32, upper_bound=32) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/ICA-results-30-8.txt", "w") as file:
print("F8: Shifted Rotated Ackley's Function with Global Optimum on Bounds", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/ICA-convergence-30-8.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f11 = optproblems.cec2005.F11(dim)
time1 = time.time()
results = np.array([ICA(f11, dim=dim, evaluation_criteria=True,
max_eval=evaluations, ncountries=countries, nimperialists=imperialists,
lower_bound=-0.5, upper_bound=0.5) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/ICA-results-30-11.txt", "w") as file:
print("F11: Shifted Rotated Weierstrass Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/ICA-convergence-30-11.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f19 = optproblems.cec2005.F19(dim)
time1 = time.time()
results = np.array([ICA(f19, dim=dim, evaluation_criteria=True,
max_eval=evaluations, ncountries=countries, nimperialists=imperialists,
lower_bound=-5, upper_bound=5) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/ICA-results-30-19.txt", "w") as file:
print("F19: Rotated Hybrid Composition Function with a Narrow Basin for the Global Optimum", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/ICA-convergence-30-19.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f20 = optproblems.cec2005.F20(dim)
time1 = time.time()
results = np.array([ICA(f20, dim=dim, evaluation_criteria=True,
max_eval=evaluations, ncountries=countries, nimperialists=imperialists,
lower_bound=-5, upper_bound=5) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/ICA-results-30-20.txt", "w") as file:
print("F20: Rotated Hybrid Composition Function with the Global Optimum on the Bounds", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/ICA-convergence-30-20.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
|
{
"content_hash": "79f13c7c4422b6aa6fb2e1f307eae4f8",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 111,
"avg_line_length": 36.554140127388536,
"alnum_prop": 0.5924377069175815,
"repo_name": "JJSrra/Research-SocioinspiredAlgorithms",
"id": "3674b4c113a8b39ac3285c97be56f7ad222427a5",
"size": "5739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ICA/ICAbenchmark30-3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "357836"
},
{
"name": "Shell",
"bytes": "23920"
}
],
"symlink_target": ""
}
|
import queue
import concurrent.futures
import time
import hashlib
import sys
import re
import string
import random
import urllib.request
import urllib.parse
from html.parser import HTMLParser
from collections import namedtuple
from classes.cache import Cache
from classes.results import Results
class HTMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.tagtext = []
def handle_data(self, d):
self.tagtext.append(d)
def get_tagtext(self):
return ''.join(self.tagtext)
def _clean_page(page):
# this the same method nmap's http.lua uses for error page detection
# nselib/http.lua: clean_404
# remove information from the page that might not be static
# time
page = re.sub(b'(\d?\d:?){2,3}', b'',page)
page = re.sub(b'AM', b'',page, flags=re.IGNORECASE)
page = re.sub(b'PM', b'',page, flags=re.IGNORECASE)
# date with 4 digit year
page = re.sub(b'(\d){8}', '',page)
page = re.sub(b'\d{4}-\d{2}-\d{2}', b'',page)
page = re.sub(b'\d{4}/\d{2}/\d{2}', b'',page)
page = re.sub(b'\d{2}-\d{2}-\d{4}', b'',page)
page = re.sub(b'\d{2}/\d{2}/\d{4}', b'',page)
# date with 2 digit year
page = re.sub( b'(\d){6}', '',page)
page = re.sub( b'\d{2}-\d{2}-\d{2}', b'',page)
page = re.sub( b'\d{2}/\d{2}/\d{2}', b'',page)
# links and paths
page = re.sub( b'/[^ ]+', b'', page)
page = re.sub( b'[a-zA-Z]:\\[^ ]+', b'', page)
# return the fingerprint of the stripped page
return hashlib.md5(page).hexdigest().lower()
def _create_response(response):
R = Response()
url = response.geturl()
response_info = urllib.request.urlparse(url)
body = response.read()
# get the page text only
parser = HTMLStripper()
parser.feed(body.decode('utf-8', 'ignore'))
page_text = parser.get_tagtext()
R.set_body(body)
R.protocol = response_info.scheme
R.host = response_info.netloc
R.url = url
R.status = {'code': response.code, 'text': response.reason}
R.headers = {pair[0].lower():pair[1] for pair in response.getheaders()}
R.md5 = hashlib.md5(body).hexdigest().lower()
R.md5_404 = _clean_page(body)
R.md5_404_text = _clean_page(page_text.encode('utf-8', 'ignore'))
return(R)
#######################################################################
#
# Override urllib.request classes
#
#######################################################################
class OutOfScopeException(Exception):
def __init__(self, org_url, new_url):
self.original_netloc = org_url.netloc
self.new_netloc = new_url.netloc
def __str__(self):
return repr( "%s is not in scope %s" % (self.new_netloc, self.original_netloc) )
class UnknownHostName(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return "Unknown host: %s" % (self.url,)
class ErrorHandler(urllib.request.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
return(fp)
class RedirectHandler(urllib.request.HTTPRedirectHandler):
"""
This currently only checks if the redirection netloc is
the same as the the netloc for the request.
NOTE: this is very strict, as it will not allow redirections
from 'example.com' to 'www.example.com'
"""
def http_error_302(self, req, fp, code, msg, headers):
if 'location' in headers:
org_url = urllib.request.urlparse(req.get_full_url())
new_url = urllib.request.urlparse(headers['location'])
# if the location starts with '/' the path is relative
if headers['location'].startswith('/'):
new_url = new_url._replace(scheme=org_url.scheme, netloc=org_url.netloc)
if not new_url.netloc == org_url.netloc:
raise OutOfScopeException(org_url, new_url)
# call python's built-in redirection handler
return urllib.request.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
#######################################################################
#
# Custom request and response classes
#
#######################################################################
class Response:
"""
This is object is used to store response information
The normal http.client.HTTPResponse cannot be pickled
which is used in the caching process
"""
def __init__(self):
self.url = ''
self.protocol = ''
self.host = ''
self.status = {}
self.headers = {}
self.body = ''
self.md5 = ''
self.md5_404 = ''
self.should_be_error_page = False
chars = string.ascii_uppercase + string.digits
self.id = ''.join(random.choice(chars) for _ in range(16))
def get_url(self):
url_data = urllib.request.urlparse(self.url)
if url_data.scheme == '': url_data._replace(scheme=self.protocol)
if url_data.netloc == '': url_data._replace(netloc=self.host)
return url_data.geturl()
def set_body(self, body):
# check if the encoding is specified in the http header
content_type = 'Content-Type'.lower()
if content_type not in self.headers:
self.body = str(body, errors='replace')
else:
# find content-type definitions
content_types = {'text': False, 'charset': None}
for item in self.headers[content_type].split(';'):
if 'text' in item:
content_types['text'] = True
if 'charset' in item:
content_types['charset'] = item.split('=')[1]
# set the encoding to use
if content_types['charset'] is not None:
self.body = str(body, content_types['charset'], errors='replace')
elif content_types['text']:
self.body = str(body, 'ISO-8859-1', errors='replace')
else:
self.body = str(body, errors='replace')
def __repr__(self):
def get_string(r):
string = r.url + '\n'
string += '%s %s\n' %(r.status['code'], r.status['text'])
string += '\n'.join([header +': '+ r.headers[header] for header in r.headers])
string += '\n\n'
string += 'MD5: ' + self.md5 + '\n'
string += 'MD5 Error page: ' + self.md5_404 + '\n'
return string
return get_string(self)
class Requester:
def __init__(self, options, data):
self.threads = options['threads']
self.proxy = options['proxy']
self.user_agent = options['user_agent']
self.data = data
self.cache = data['cache']
self.requested = data['requested']
self.printer = data['printer']
self.is_redirected = False
self.find_404s = False
self.fingerprintQueue = None
self.url_data = urllib.request.urlparse(options['url'])
if options['prefix']:
self.url_data.path = options['prefix'] + self.url_data.path
self.url = urllib.request.urlunparse(self.url_data)
def _create_fetcher(self, redirect_handler=True):
args = [ErrorHandler]
if self.proxy == None:
args.append(urllib.request.ProxyHandler({}))
elif not self.proxy == False:
protocol = self.url_data.scheme
args.append(urllib.request.ProxyHandler({protocol: self.proxy}))
if redirect_handler:
args.append(RedirectHandler)
opener = urllib.request.build_opener(*args)
opener.addheaders = [('User-agent', self.user_agent)]
return opener
def detect_redirect(self):
parse = urllib.request.urlparse
# the original url
org_url = self.url_data
# get an opener doing redirections
try:
opener = self._create_fetcher(redirect_handler=False)
response = opener.open(self.url)
except:
raise UnknownHostName(self.url)
# the new url
new_url = parse(response.geturl())
# detect a redirection
new_loc = new_url.scheme + '://' + new_url.netloc
org_loc = org_url.scheme + '://' + org_url.netloc
self.is_redirected = not(new_loc == org_loc)
if self.is_redirected:
self.printer.print('%s redirects to %s\n' % (org_loc, new_loc), 2, '')
else:
self.printer.print('%s does not redirect\n' % (org_loc, ), 2, '')
# create an response object and add it to the cache
R = _create_response(response)
self.cache[new_loc] = R
self.cache[self.url] = R
return (self.is_redirected, new_loc)
def request(self, fp_list):
url = fp_list[0]['url']
complete_url = urllib.parse.urljoin(self.url, url)
R = None
# check if the url is out of scope
url_data = urllib.parse.urlparse(complete_url)
host_data = urllib.parse.urlparse(self.url)
if not url_data.netloc == host_data.netloc:
pass
elif not complete_url in self.cache:
try:
opener = self._create_fetcher()
request = urllib.request.Request(complete_url)
response = opener.open(request)
R = _create_response(response)
self.cache[complete_url] = R
self.cache[response.geturl()] = R
except Exception as e:
pass
else:
R = self.cache[complete_url]
return (fp_list, R)
def run(self, run_type, fp_lists):
with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
future_list = []
for fp_list in fp_lists:
future_list.append(executor.submit(self.request, fp_list))
for future in concurrent.futures.as_completed(future_list):
self.requested.put(future.result())
return self.requested
|
{
"content_hash": "3426065d8b17cb8e8cee3013cc20c24c",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 93,
"avg_line_length": 26.78078078078078,
"alnum_prop": 0.6437542049786947,
"repo_name": "akamajoris/wig",
"id": "2b292a0601a22532682609f7d0b912a8c2217287",
"size": "8918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/request2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "62799"
}
],
"symlink_target": ""
}
|
import os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='vapns-client',
version='0.2.1.6',
author='Sardar Yumatov',
author_email='ja.doma@gmail.com',
url='https://bitbucket.org/sardarnl/apns-client',
description='Python client for Apple Push Notification service (APNs)',
long_description=read('README.rst'),
packages=['vapnsclient', 'vapnsclient.backends'],
license="Apache 2.0",
keywords='apns push notification apple messaging iOS',
install_requires=['pyOpenSSL', 'six'],
classifiers = [ 'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
{
"content_hash": "1bcbf9a1f6bc79caf28764bec3130e51",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 83,
"avg_line_length": 35.67857142857143,
"alnum_prop": 0.6366366366366366,
"repo_name": "vine/apns-client",
"id": "b082620b44c864c866553166efc24a346e5df6e9",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "116232"
}
],
"symlink_target": ""
}
|
from .proxy_only_resource import ProxyOnlyResource
class PushSettings(ProxyOnlyResource):
"""Push settings for the App.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param is_push_enabled: Gets or sets a flag indicating whether the Push
endpoint is enabled.
:type is_push_enabled: bool
:param tag_whitelist_json: Gets or sets a JSON string containing a list of
tags that are whitelisted for use by the push registration endpoint.
:type tag_whitelist_json: str
:param tags_requiring_auth: Gets or sets a JSON string containing a list
of tags that require user authentication to be used in the push
registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
:type tags_requiring_auth: str
:param dynamic_tags_json: Gets or sets a JSON string containing a list of
dynamic tags that will be evaluated from user claims in the push
registration endpoint.
:type dynamic_tags_json: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'is_push_enabled': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_push_enabled': {'key': 'properties.isPushEnabled', 'type': 'bool'},
'tag_whitelist_json': {'key': 'properties.tagWhitelistJson', 'type': 'str'},
'tags_requiring_auth': {'key': 'properties.tagsRequiringAuth', 'type': 'str'},
'dynamic_tags_json': {'key': 'properties.dynamicTagsJson', 'type': 'str'},
}
def __init__(self, is_push_enabled, kind=None, tag_whitelist_json=None, tags_requiring_auth=None, dynamic_tags_json=None):
super(PushSettings, self).__init__(kind=kind)
self.is_push_enabled = is_push_enabled
self.tag_whitelist_json = tag_whitelist_json
self.tags_requiring_auth = tags_requiring_auth
self.dynamic_tags_json = dynamic_tags_json
|
{
"content_hash": "d5251ab3393c4669c8c00fa37d46e593",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 126,
"avg_line_length": 41.15,
"alnum_prop": 0.6362899959497772,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "9c89aa6540ca2da48810eb52f5517a36631b9cec",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/push_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Tests for nova websocketproxy."""
import copy
import io
import socket
from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
from nova.console.securityproxy import base
from nova.console import websocketproxy
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_console_auth_token as fake_ca
from nova import utils
CONF = nova.conf.CONF
class NovaProxyRequestHandlerDBTestCase(test.TestCase):
def setUp(self):
super(NovaProxyRequestHandlerDBTestCase, self).setUp()
self.flags(console_allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'])
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = websocketproxy.NovaWebSocketProxy()
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def _fake_console_db(self, **updates):
console_db = copy.deepcopy(fake_ca.fake_token_dict)
console_db['token_hash'] = utils.get_sha256_str('123-456-789')
if updates:
console_db.update(updates)
return console_db
fake_header = {
'cookie': 'token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
def test_new_websocket_client_db(
self, mock_validate_port, mock_inst_get, mock_validate,
internal_access_path=None,
instance_not_found=False):
db_obj = self._fake_console_db(
host='node1',
port=10000,
console_type='novnc',
access_url_base='https://example.net:6080',
internal_access_path=internal_access_path,
instance_uuid=uuids.instance,
# This is set by ConsoleAuthToken.validate
token='123-456-789'
)
ctxt = nova_context.get_context()
obj = nova.objects.ConsoleAuthToken._from_db_object(
ctxt, nova.objects.ConsoleAuthToken(), db_obj)
mock_validate.return_value = obj
if instance_not_found:
mock_inst_get.side_effect = exception.InstanceNotFound(
instance_id=uuids.instance)
if internal_access_path is None:
self.wh.socket.return_value = '<socket>'
else:
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
if instance_not_found:
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
else:
with mock.patch('nova.context.get_admin_context',
return_value=ctxt):
self.wh.new_websocket_client()
mock_validate.assert_called_once_with(ctxt, '123-456-789')
mock_validate_port.assert_called_once_with(
ctxt, mock_inst_get.return_value, str(db_obj['port']),
db_obj['console_type'])
self.wh.socket.assert_called_with('node1', 10000, connect=True)
if internal_access_path is None:
self.wh.do_proxy.assert_called_with('<socket>')
else:
self.wh.do_proxy.assert_called_with(tsock)
def test_new_websocket_client_db_internal_access_path(self):
self.test_new_websocket_client_db(internal_access_path='vmid')
def test_new_websocket_client_db_instance_not_found(self):
self.test_new_websocket_client_db(instance_not_found=True)
class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaProxyRequestHandlerTestCase, self).setUp()
self.flags(allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'],
group='console')
self.server = websocketproxy.NovaWebSocketProxy()
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = self.server
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
fake_header = {
'cookie': 'token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_ipv6 = {
'cookie': 'token="123-456-789"',
'Origin': 'https://[2001:db8::1]:6080',
'Host': '[2001:db8::1]:6080',
}
fake_header_bad_token = {
'cookie': 'token="XXX"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_bad_origin = {
'cookie': 'token="123-456-789"',
'Origin': 'https://bad-origin-example.net:6080',
'Host': 'example.net:6080',
}
fake_header_allowed_origin = {
'cookie': 'token="123-456-789"',
'Origin': 'https://allowed-origin-example-2.net:6080',
'Host': 'example.net:6080',
}
fake_header_blank_origin = {
'cookie': 'token="123-456-789"',
'Origin': '',
'Host': 'example.net:6080',
}
fake_header_no_origin = {
'cookie': 'token="123-456-789"',
'Host': 'example.net:6080',
}
fake_header_http = {
'cookie': 'token="123-456-789"',
'Origin': 'http://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_malformed_cookie = {
'cookie': '?=!; token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
# ensure that token is masked when logged
connection_info = self.wh.msg.mock_calls[0][1][1]
self.assertEqual('***', connection_info.token)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_ipv6_url(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://[2001:db8::1]:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://[2001:db8::1]/?token=123-456-789"
self.wh.headers = self.fake_header_ipv6
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_token_invalid(self, validate):
validate.side_effect = exception.InvalidToken(token='XXX')
self.wh.path = "http://127.0.0.1/?token=XXX"
self.wh.headers = self.fake_header_bad_token
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
validate.assert_called_with(mock.ANY, "XXX")
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
tsock.send.assert_called_with(test.MatchType(bytes))
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path_err(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.assertRaises(exception.InvalidConnectionInfo,
self.wh.new_websocket_client)
validate.assert_called_with(mock.ANY, "123-456-789")
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path_rfb(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
HTTP_RESP = "HTTP/1.1 200 OK\r\n\r\n"
RFB_MSG = "RFB 003.003\n"
# RFB negotiation message may arrive earlier.
tsock.recv.side_effect = [HTTP_RESP + RFB_MSG,
HTTP_RESP]
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
tsock.recv.assert_has_calls([mock.call(4096, socket.MSG_PEEK),
mock.call(len(HTTP_RESP))])
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('socket.getfqdn')
def test_address_string_doesnt_do_reverse_dns_lookup(self, getfqdn):
request_mock = mock.MagicMock()
request_mock.makefile().readline.side_effect = [
b'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
b''
]
server_mock = mock.MagicMock()
client_address = ('8.8.8.8', 54321)
handler = websocketproxy.NovaProxyRequestHandler(
request_mock, client_address, server_mock)
handler.log_message('log message using client address context info')
self.assertFalse(getfqdn.called) # no reverse dns look up
self.assertEqual(handler.address_string(), '8.8.8.8') # plain address
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_bad_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_bad_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_allowed_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_allowed_origin
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_blank_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_blank_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_no_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_no_origin
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_https_origin_proto_http(
self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'http://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "https://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_https_origin_proto_ws(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url_base': 'ws://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "https://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_http_forwarded_proto_https(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url_base': 'wss://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
header = {
'cookie': 'token="123-456-789"',
'Origin': 'http://example.net:6080',
'Host': 'example.net:6080',
'X-Forwarded-Proto': 'https'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "https://127.0.0.1/"
self.wh.headers = header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_bad_console_type(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'bad-console-type'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_malformed_cookie(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_malformed_cookie
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
client_addr = ('8.8.8.8', 54321)
mock_server = mock.MagicMock()
# This specifies that the server will be able to handle requests other
# than only websockets.
mock_server.only_upgrade = False
# Constructing a handler will process the mock_req request passed in.
handler = websocketproxy.NovaProxyRequestHandler(
mock_req, client_addr, mock_server)
# Collect the response data to verify at the end. The
# SimpleHTTPRequestHandler writes the response data to a 'wfile'
# attribute.
output = io.BytesIO()
handler.wfile = output
# Process the mock_req again to do the capture.
handler.do_GET()
output.seek(0)
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
# NOTE: As of python 3.10.6 there is a fix for this vulnerability,
# which will cause a 301 Moved Permanently error to be returned
# instead that redirects to a sanitized version of the URL with extra
# leading '/' characters removed.
# See https://github.com/python/cpython/issues/87389 for details.
# We will consider either response to be valid for this test. This will
# also help if and when the above fix gets backported to older versions
# of python.
errmsg = result[0].decode()
expected_nova = '400 URI must not start with //'
expected_cpython = '301 Moved Permanently'
self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
# If we detect the cpython fix, verify that the redirect location is
# now the same url but with extra leading '/' characters removed.
if expected_cpython in errmsg:
location = result[3].decode()
location = location.removeprefix('Location: ').rstrip('\r\n')
self.assertTrue(
location.startswith('/example.com/%2F..'),
msg='Redirect location is not the expected sanitized URL',
)
def test_reject_open_redirect_3_slashes(self):
self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
"""Tests that we don't create a ComputeAPI object until we actually
need to use it to call the internal compute RPC API after token
validation succeeds. This way, we will not perform expensive object
creations when we receive unauthenticated (via token) messages. In the
past, it was possible for unauthenticated requests such as TCP RST or
requests with invalid tokens to be used to DOS the console proxy
service.
"""
# We will simulate a request with an invalid token and verify it
# will not trigger a ComputeAPI object creation.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
b'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
b''
]
client_addr = ('8.8.8.8', 54321)
mock_server = mock.MagicMock()
handler = websocketproxy.NovaProxyRequestHandler(
mock_req, client_addr, mock_server)
# Internal ComputeAPI reference should be None when the request handler
# is initially created.
self.assertIsNone(handler._compute_rpcapi)
# Set up a token validation to fail when the new_websocket_client
# is called to handle the request.
mock_validate.side_effect = exception.InvalidToken(token='123-456-789')
# We expect InvalidToken to be raised during handling.
self.assertRaises(exception.InvalidToken, handler.new_websocket_client)
# And our internal ComputeAPI reference should still be None.
self.assertIsNone(handler._compute_rpcapi)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_ssl_min_version_is_not_set(self, mock_select_ssl):
websocketproxy.NovaWebSocketProxy()
self.assertFalse(mock_select_ssl.called)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_ssl_min_version_not_set_by_default(self, mock_select_ssl):
websocketproxy.NovaWebSocketProxy(ssl_minimum_version='default')
self.assertFalse(mock_select_ssl.called)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_non_default_ssl_min_version_is_set(self, mock_select_ssl):
minver = 'tlsv1_3'
websocketproxy.NovaWebSocketProxy(ssl_minimum_version=minver)
mock_select_ssl.assert_called_once_with(minver)
class NovaWebsocketSecurityProxyTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaWebsocketSecurityProxyTestCase, self).setUp()
self.flags(allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'],
group='console')
self.server = websocketproxy.NovaWebSocketProxy(
security_proxy=mock.MagicMock(
spec=base.SecurityProxy)
)
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = self.server
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def get_header(header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
self.wh.headers.get = get_header
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
@mock.patch('nova.console.websocketproxy.TenantSock.close')
@mock.patch('nova.console.websocketproxy.TenantSock.finish_up')
def test_proxy_connect_ok(self, mock_finish, mock_close,
mock_port_validate, mock_get,
mock_token_validate):
mock_token_validate.return_value = nova.objects.ConsoleAuthToken(
instance_uuid=uuids.instance, host='node1', port='10000',
console_type='novnc', access_url_base='https://example.net:6080')
# The token and id attributes are set by the validate() method.
mock_token_validate.return_value.token = '123-456-789'
mock_token_validate.return_value.id = 1
sock = mock.MagicMock(
spec=websocketproxy.TenantSock)
self.server.security_proxy.connect.return_value = sock
self.wh.new_websocket_client()
self.wh.do_proxy.assert_called_with(sock)
mock_finish.assert_called_with()
self.assertEqual(len(mock_close.calls), 0)
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
@mock.patch('nova.console.websocketproxy.TenantSock.close')
@mock.patch('nova.console.websocketproxy.TenantSock.finish_up')
def test_proxy_connect_err(self, mock_finish, mock_close,
mock_port_validate, mock_get,
mock_token_validate):
mock_token_validate.return_value = nova.objects.ConsoleAuthToken(
instance_uuid=uuids.instance, host='node1', port='10000',
console_type='novnc', access_url_base='https://example.net:6080')
# The token attribute is set by the validate() method.
mock_token_validate.return_value.token = '123-456-789'
mock_token_validate.return_value.id = 1
ex = exception.SecurityProxyNegotiationFailed("Wibble")
self.server.security_proxy.connect.side_effect = ex
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.wh.new_websocket_client)
self.assertEqual(len(self.wh.do_proxy.calls), 0)
mock_close.assert_called_with()
self.assertEqual(len(mock_finish.calls), 0)
|
{
"content_hash": "bf915a7eacd48a2be609d21f1625a14c",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 79,
"avg_line_length": 39.86143790849673,
"alnum_prop": 0.5910015084934741,
"repo_name": "mahak/nova",
"id": "fc25bef2bc3f77b6bd83d000d05ab018c004172f",
"size": "31092",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/console/test_websocketproxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'protocoin'
copyright = u'2013, Christian S. Perone'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'protocoindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'protocoin.tex', u'protocoin Documentation',
u'Christian S. Perone', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'protocoin', u'protocoin Documentation',
[u'Christian S. Perone'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'protocoin', u'protocoin Documentation',
u'Christian S. Perone', 'protocoin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "05e8e013ca74ba4982a63d4487774cba",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.44541484716157,
"alnum_prop": 0.7032301480484522,
"repo_name": "perone/protocoin",
"id": "b5ac8d39e1a736396fc62416c9249b428d14e1b7",
"size": "7850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "42688"
}
],
"symlink_target": ""
}
|
import speech_recognition as sr
from glob import glob
from ctypes import *
from contextlib import contextmanager
from array import array
from struct import pack
import sys
import os
import select
import pyaudio
import audioop
import wave
# Need 16kHz 16bit mono .wav
FREQ=16000
CHUNK=256
# File management stuff
DIR=os.getcwd()
PATH="" # I feel better when explicitly initializing variables :)
FILENAME=""
args = sys.argv
# Default values if none are specified in argv
if len(args) == 1:
existingFiles=len(glob(DIR+'/'+'recording-*.wav'))
PATH=DIR
if DIR!="/":
PATH=DIR+"/"
FILENAME='recording-%d.wav'%(existingFiles+1)
elif len(args) == 3:
PATH=args[-2]
FILENAME=args[-1]
if PATH[-1] != '/':
PATH=PATH+'/'
if FILENAME[-4:] != '.wav':
FILENAME=FILENAME+".wav"
else:
print "Error: arguments are wrong!"
print "Make sure to run the script as either:"
print "\t python recorder.py"
print "\t python recorder.py PATH FILENAME"
sys.exit(-1)
# Error management
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noalsaerr():
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
# Ok now we can start
with noalsaerr():
p = pyaudio.PyAudio()
r = sr.Recognizer()
stream = p.open(format=pyaudio.paInt16,channels=1,rate=FREQ,input=True,frames_per_buffer=CHUNK,input_device_index=2)
rawData=array('h')
print "Recording. Press the ENTER key to stop."
while True:
if sys.stdin in select.select([sys.stdin],[],[],0)[0]:
raw_input()
break
try:
buf = stream.read(CHUNK)
except IOError as ex:
if ex[1] != pyaudio.paInputOverflowed:
raise
if buf:
data = array('h', buf)
# We'll use rms as a switch later.
rms = audioop.rms(data,2)
rawData.extend(data)
#rawData.append(data)
print "Writing to "+PATH+FILENAME+"..."
data = pack('<'+ ('h'*len(rawData)), *rawData)
width = p.get_sample_size(pyaudio.paInt16) #16 bit...
wf = wave.open(PATH+FILENAME, 'wb')
wf.setnchannels(1) #Mono...
wf.setsampwidth(width)
wf.setframerate(FREQ) # 16 kHz
wf.writeframes(data)
wf.close()
with sr.WavFile(PATH+FILENAME) as source:
audio = r.record(source)
print "Done!"
|
{
"content_hash": "dc0d9d6a14d79c2622147282d2786bfc",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 116,
"avg_line_length": 24.349514563106798,
"alnum_prop": 0.6614832535885168,
"repo_name": "adewynter/Tools",
"id": "3489a57f3450484601edbb54ee160a0d8712fbdc",
"size": "2867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Docker/dltoolboxio/scripts/recorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "23803"
},
{
"name": "CSS",
"bytes": "5812"
},
{
"name": "HTML",
"bytes": "7653"
},
{
"name": "Java",
"bytes": "15335"
},
{
"name": "JavaScript",
"bytes": "65744"
},
{
"name": "Python",
"bytes": "231496"
},
{
"name": "Scala",
"bytes": "10700"
},
{
"name": "Shell",
"bytes": "7958"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from inkdisplay import views
urlpatterns = patterns('',
url(r'^$', views.index, name='inkdisplay'),
url(r'^(?P<inkdisplay_name>\w+)/$', views.display, name='inkdisplay_display'),
)
|
{
"content_hash": "a9bc91653924ea057a7ca713aeee909a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 82,
"avg_line_length": 26,
"alnum_prop": 0.6837606837606838,
"repo_name": "volzotan/django-howl",
"id": "115e9f7a213acb3f78faae2d2ef31937fc43e1f0",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "howl/inkdisplay/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5972"
},
{
"name": "CSS",
"bytes": "12606"
},
{
"name": "Gnuplot",
"bytes": "696"
},
{
"name": "JavaScript",
"bytes": "3733"
},
{
"name": "Python",
"bytes": "55992"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.models import Permission, Group
from .forms import CustomUserChangeForm, CustomUserCreationForm
from .models import IUser, MasterProfile, BachelorProfile, IpikureSubscriber
from utils.admin import iportalen_admin_site, iportalen_superadmin_site
from django.db import models
from django.core.urlresolvers import reverse
def persons(self):
return ', '.join(['<a href="{url}">{name}</a>'.format(
url=reverse('iportalenadmin:user_managements_iuser_change', args=(x.pk,)),
name=x.username) for x in self.user_set.all().order_by('username')])
persons.allow_tags = True
def groups(self):
return ', '.join(['<a href="{url}">{name}</a>'.format(
url=reverse('iportalenadmin:auth_group_change', args=(x.pk,)),
name=x.name) for x in self.group_set.all().order_by('name')])
groups.allow_tags = True
class CustomPermission(admin.ModelAdmin):
list_display = ['name', groups]
list_display_links = ['name']
class CustomGroup(GroupAdmin):
list_display = ['name', persons]
list_display_links = ['name']
formfield_overrides = {models.ManyToManyField: {'widget': FilteredSelectMultiple("Rättigheter", is_stacked=False)}, }
class IUserAdmin(UserAdmin):
@staticmethod
def show_kobra_url(obj):
return '<a href="{:}" target="_blank">Uppdatera från kobra</a>'.format(obj.update_from_kobra_url)
show_kobra_url.allow_tags = True
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = ('username', 'email', 'is_staff', 'is_superuser', "show_kobra_url")
list_filter = ('groups', 'is_staff', 'is_superuser', 'is_active')
fieldsets = (
(None, {'fields': (
'username',
'password',
'first_name',
'last_name',
'address',
'zip_code',
'city',
'gender',
'allergies',
'start_year',
'current_year',
'klass',
'bachelor_profile',
'master_profile',
'groups',
'rfid_number',
'date_gdpr_accepted',
)}),
('Permissions', {'fields': ('is_active', 'is_superuser', 'is_staff', 'is_member', 'must_edit')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'is_staff', 'is_superuser')}
),
)
search_fields = ('username',)
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
iportalen_admin_site.unregister(Group)
iportalen_admin_site.register(Group, CustomGroup)
iportalen_admin_site.register(IUser, IUserAdmin)
iportalen_admin_site.register(MasterProfile)
iportalen_admin_site.register(BachelorProfile)
iportalen_admin_site.register(Permission, CustomPermission)
iportalen_admin_site.register(IpikureSubscriber)
iportalen_superadmin_site.register(IUser)
iportalen_superadmin_site.register(MasterProfile)
iportalen_superadmin_site.register(BachelorProfile)
iportalen_superadmin_site.register(Permission)
iportalen_superadmin_site.register(IpikureSubscriber)
|
{
"content_hash": "e1dfb6a83a052143e8fac013ba383877",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 121,
"avg_line_length": 33.323232323232325,
"alnum_prop": 0.6562594725674447,
"repo_name": "I-sektionen/i-portalen",
"id": "beaaba34feeae33b2c55e07c6031d09ca90a95c1",
"size": "3301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi/iportalen_django/user_managements/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18420"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "355692"
},
{
"name": "JavaScript",
"bytes": "415020"
},
{
"name": "Python",
"bytes": "660556"
},
{
"name": "SCSS",
"bytes": "72077"
},
{
"name": "Sass",
"bytes": "23813"
},
{
"name": "Shell",
"bytes": "1190"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from setuptools import find_packages
from distutils.cmd import Command
from distutils.extension import Extension
import os
import sys
import io
import subprocess
import platform
import numpy as np
from Cython.Build import cythonize
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
if "--line_trace" in sys.argv:
line_trace = True
profile = True
print("Build with line trace enabled ...")
sys.argv.remove("--line_trace")
else:
line_trace = False
profile = False
PACKAGE = "PyFin"
NAME = "Finance-Python"
__version__ = "0.9.8"
DESCRIPTION = "PyFin " + __version__
AUTHOR = "cheng li"
AUTHOR_EMAIL = "wegamekinglc@hotmail.com"
URL = 'https://github.com/ChinaQuants/Finance-Python'
def git_version():
from subprocess import Popen, PIPE
gitproc = Popen(['git', 'rev-parse', 'HEAD'], stdout=PIPE)
(stdout, _) = gitproc.communicate()
return stdout.strip().decode("utf8")
class test(Command):
description = "test the distribution prior to install"
user_options = [
('test-dir=', None,
"directory that contains the test definitions"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if sys.platform == 'win32':
command = "coverage run PyFin/tests/testSuite.py& coverage report& coverage html"
else:
command = "coverage run PyFin/tests/testSuite.py; coverage report; coverage html"
process = subprocess.Popen(command, shell=True)
process.wait()
class version_build(Command):
description = "test the distribution prior to install"
user_options = [
('test-dir=', None,
"directory that contains the test definitions"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
git_ver = git_version()[:10]
configFiles = ['PyFin/__init__.py', 'setup.py']
for configFile in configFiles:
file_handle = open(configFile, 'r')
lines = file_handle.readlines()
newFiles = []
for line in lines:
if line.startswith('__version__'):
line = line.split('+')[0].rstrip()
line = line + " + \"-" + git_ver + "\"\n"
newFiles.append(line)
file_handle.close()
os.remove(configFile)
file_handle = open(configFile, 'w')
file_handle.writelines(newFiles)
file_handle.close()
requirements = "requirements.txt"
ext_modules = [
"PyFin/Analysis/SeriesValues.pyx",
"PyFin/Analysis/transformer.pyx",
"PyFin/Analysis/SecurityValueHolders.pyx",
"PyFin/Analysis/CrossSectionValueHolders.pyx",
"PyFin/Analysis/TechnicalAnalysis/StatefulTechnicalAnalysers.pyx",
"PyFin/Analysis/TechnicalAnalysis/StatelessTechnicalAnalysers.pyx",
"PyFin/Math/Accumulators/impl.pyx",
"PyFin/Math/Accumulators/IAccumulators.pyx",
"PyFin/Math/Accumulators/StatefulAccumulators.pyx",
"PyFin/Math/Accumulators/StatelessAccumulators.pyx",
"PyFin/Math/Distributions/NormalDistribution.pyx",
"PyFin/Math/Distributions/norm.pyx",
"PyFin/Math/ErrorFunction.pyx",
"PyFin/Math/MathConstants.pyx",
"PyFin/Math/udfs.pyx",
"PyFin/DateUtilities/Calendar.pyx",
"PyFin/DateUtilities/Date.pyx",
"PyFin/DateUtilities/Period.pyx",
"PyFin/DateUtilities/Schedule.pyx",
"PyFin/Utilities/Asserts.pyx",
"PyFin/Utilities/Tools.pyx",
"PyFin/PricingEngines/BlackFormula.pyx",
"PyFin/PricingEngines/SabrFormulaImpl.pyx",
"PyFin/PricingEngines/SVIInterpolationImpl.pyx",
"PyFin/Enums/TimeUnits.pyx",
"PyFin/Enums/BizDayConventions.pyx",
"PyFin/Enums/DateGeneration.pyx",
"PyFin/Enums/Months.pyx",
"PyFin/Enums/NormalizingType.pyx",
"PyFin/Enums/OptionType.pyx",
"PyFin/Enums/Weekdays.pyx"
]
def generate_extensions(ext_modules, line_trace=False):
extensions = []
if line_trace:
print("define cython trace to True ...")
define_macros = [('CYTHON_TRACE', 1), ('CYTHON_TRACE_NOGIL', 1)]
else:
define_macros = []
for pyxfile in ext_modules:
ext = Extension(name='.'.join(pyxfile.split('/'))[:-4],
sources=[pyxfile],
define_macros=define_macros)
extensions.append(ext)
return extensions
if platform.system() != "Windows":
import multiprocessing
n_cpu = multiprocessing.cpu_count()
else:
n_cpu = 0
ext_modules_settings = cythonize(generate_extensions(ext_modules, line_trace),
compiler_directives={"embedsignature": True,
"linetrace": line_trace,
"profile": profile,
"language_level": 3},
nthreads=n_cpu)
setup(
name=NAME,
version=__version__,
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
packages=find_packages(),
include_package_data=False,
install_requires=io.open(requirements, encoding='utf8').read(),
classifiers=[],
cmdclass={"test": test,
"version_build": version_build},
ext_modules=ext_modules_settings,
include_dirs=[np.get_include()],
)
|
{
"content_hash": "006bfbaf0fc56ca78bd07d7121679ad1",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 93,
"avg_line_length": 29.934426229508198,
"alnum_prop": 0.6217597663380796,
"repo_name": "ChinaQuants/Finance-Python",
"id": "8273b21efbe79e638b98a1d75210e9f8b01ff722",
"size": "5503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "365812"
}
],
"symlink_target": ""
}
|
from django import template
from xgds_core.models import Constant
register = template.Library()
@register.simple_tag(name='constant')
def constant(name):
try:
return Constant.objects.get(name=name)
except:
return None
@register.simple_tag(name='constant_value')
def constant_value(name):
try:
return Constant.objects.get(name=name).value
except:
return None
|
{
"content_hash": "86093b5e8a7e3ecb30bfccf8d9cad705",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 52,
"avg_line_length": 20.75,
"alnum_prop": 0.6843373493975904,
"repo_name": "xgds/xgds_core",
"id": "f65841e528c6c77c65adf37826d45f81a661e8ff",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xgds_core/templatetags/core_constant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9920"
},
{
"name": "HTML",
"bytes": "50428"
},
{
"name": "JavaScript",
"bytes": "57295"
},
{
"name": "Python",
"bytes": "313670"
}
],
"symlink_target": ""
}
|
"""Clear out matching diffs.
Revision ID: 1cf84ddb034c
Revises: 4d7019b218d4
Create Date: 2013-10-28 08:46:17.516023
"""
# revision identifiers, used by Alembic.
revision = '1cf84ddb034c'
down_revision = '4d7019b218d4'
from alembic import context, op
from collections import defaultdict
from sqlalchemy.sql import and_, table, column
import os
import pickle
import sqlalchemy as sa
file_ = table('file',
column('id', sa.Integer),
column('sha1', sa.String),
column('size', sa.Integer))
testcase = table('testcase',
column('id', sa.Integer),
column('output_type', sa.Enum('diff', 'image', 'text',
name='output_type')))
testcaseresult = table('testcaseresult',
column('diff_id', sa.Integer),
column('submission_id', sa.Integer),
column('test_case_id', sa.Integer))
def no_diff(sha1, directory):
path = os.path.join(directory, sha1[:2], sha1[2:4], sha1[4:])
diff = pickle.load(open(path))
return diff._diff is None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
directory = context.config.file_config.get('app:main', 'file_directory')
conn = op.get_bind()
# test cases whose results store diffs
diff_tcs = [tc_id for (tc_id, output) in conn.execute(testcase.select())
if output == 'diff']
diff_to_tcrs = defaultdict(list)
for (diff_id, s_id, tc_id) in conn.execute(
testcaseresult.select().where(testcaseresult.c.test_case_id
.in_(diff_tcs))):
if diff_id:
diff_to_tcrs[diff_id].append({'s_id': s_id, 'tc_id': tc_id})
# Prune diffs that are not significantly unique
diff_to_tcrs = {x: y for (x, y) in diff_to_tcrs.items() if len(y) > 64}
# Find test case results with no diff
empty_tcrs = []
for (f_id, sha1, size) in conn.execute(file_.select()
.where(file_.c.id
.in_(diff_to_tcrs.keys()))):
if size == 0:
print('Found unexpected zero byte diff on: {}'
.format(diff_to_tcrs[f_id]))
empty_tcrs.extend(diff_to_tcrs[f_id])
elif no_diff(sha1, directory):
empty_tcrs.extend(diff_to_tcrs[f_id])
if not empty_tcrs: # Exit early
return
# Update diff to None for appropriate TestCaseResults
conn.execute(testcaseresult.update().where(and_(
testcaseresult.c.submission_id == sa.bindparam('s_id'),
testcaseresult.c.test_case_id == sa.bindparam('tc_id')))
.values(diff_id=None), empty_tcrs)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
{
"content_hash": "82d31a69fca7ea3182cf89c9274dc644",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 33.45454545454545,
"alnum_prop": 0.5682744565217391,
"repo_name": "ucsb-cs/submit",
"id": "782a0a0d9a1cf581a40a66b042365c4d9c9ef144",
"size": "2944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submit/migrations/versions/1cf84ddb034c_clear_out_matching_d.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2234"
},
{
"name": "HTML",
"bytes": "995"
},
{
"name": "JavaScript",
"bytes": "36963"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "284298"
},
{
"name": "Shell",
"bytes": "390"
}
],
"symlink_target": ""
}
|
from distutils import log
from distutils.core import Command
from distutils.errors import DistutilsSetupError
import os
import re
from datetime import date
class release(Command):
description = "create and release a new version"
user_options = [
('keyid', None, "GPG key to sign with"),
('skip-tests', None, "skip running the tests"),
('pypi', None, "publish to pypi"),
]
boolean_options = ['skip-tests', 'pypi']
def initialize_options(self):
self.keyid = None
self.skip_tests = 0
self.pypi = 0
def finalize_options(self):
self.cwd = os.getcwd()
self.fullname = self.distribution.get_fullname()
self.name = self.distribution.get_name()
self.version = self.distribution.get_version()
def _verify_version(self):
with open('NEWS', 'r') as news_file:
line = news_file.readline()
now = date.today().strftime('%Y-%m-%d')
if not re.search(r'Version %s \(released %s\)' % (self.version, now),
line):
raise DistutilsSetupError("Incorrect date/version in NEWS!")
def _verify_tag(self):
if os.system('git tag | grep -q "^%s\$"' % self.fullname) == 0:
raise DistutilsSetupError(
"Tag '%s' already exists!" % self.fullname)
def _sign(self):
if os.path.isfile('dist/%s.tar.gz.asc' % self.fullname):
# Signature exists from upload, re-use it:
sign_opts = ['--output dist/%s.tar.gz.sig' % self.fullname,
'--dearmor dist/%s.tar.gz.asc' % self.fullname]
else:
# No signature, create it:
sign_opts = ['--detach-sign', 'dist/%s.tar.gz' % self.fullname]
if self.keyid:
sign_opts.insert(1, '--default-key ' + self.keyid)
self.execute(os.system, ('gpg ' + (' '.join(sign_opts)),))
if os.system('gpg --verify dist/%s.tar.gz.sig' % self.fullname) != 0:
raise DistutilsSetupError("Error verifying signature!")
def _tag(self):
tag_opts = ['-s', '-m ' + self.fullname, self.fullname]
if self.keyid:
tag_opts[0] = '-u ' + self.keyid
self.execute(os.system, ('git tag ' + (' '.join(tag_opts)),))
def _do_call_publish(self, cmd):
self._published = os.system(cmd) == 0
def _publish(self):
web_repo = os.getenv('YUBICO_GITHUB_REPO')
if web_repo and os.path.isdir(web_repo):
artifacts = [
'dist/%s.tar.gz' % self.fullname,
'dist/%s.tar.gz.sig' % self.fullname
]
cmd = '%s/publish %s %s %s' % (
web_repo, self.name, self.version, ' '.join(artifacts))
self.execute(self._do_call_publish, (cmd,))
if self._published:
self.announce("Release published! Don't forget to:", log.INFO)
self.announce("")
self.announce(" (cd %s && git push)" % web_repo, log.INFO)
self.announce("")
else:
self.warn("There was a problem publishing the release!")
else:
self.warn("YUBICO_GITHUB_REPO not set or invalid!")
self.warn("This release will not be published!")
def run(self):
if os.getcwd() != self.cwd:
raise DistutilsSetupError("Must be in package root!")
self._verify_version()
self._verify_tag()
self.execute(os.system, ('git2cl > ChangeLog',))
if not self.skip_tests:
self.run_command('check')
try:
self.run_command('test')
except SystemExit as e:
if e.code != 0:
raise DistutilsSetupError("There were test failures!")
self.run_command('sdist')
if self.pypi:
cmd_obj = self.distribution.get_command_obj('upload')
cmd_obj.sign = True
if self.keyid:
cmd_obj.identity = self.keyid
self.run_command('upload')
self._sign()
self._tag()
self._publish()
self.announce("Release complete! Don't forget to:", log.INFO)
self.announce("")
self.announce(" git push && git push --tags", log.INFO)
self.announce("")
|
{
"content_hash": "28a0c1b28c28f9cb5768ea1f368d5525",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 35.51639344262295,
"alnum_prop": 0.5407339026078929,
"repo_name": "Yubico/python-yubico-dpkg",
"id": "1f70b2256ed079c9b5949cefad15c880e3eba91e",
"size": "5705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "release.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groff",
"bytes": "3704"
},
{
"name": "Python",
"bytes": "121622"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
import time
from openerp import pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
class is_resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_columns = {
'partner_id': fields.many2one('res.partner', 'Company'),
'reason': fields.selection([('h_summer', 'Summer holiday'),
('h_winter', 'Winter holiday'),
('h_public', 'Public holiday'),
('others', 'Others')], 'Reason of close'),
}
def create(self, cr, uid, vals, context=None):
if vals['date_from']:
date = datetime.strptime(vals['date_from'], '%Y-%m-%d %H:%M:%S')
date_from = date.strftime('%Y-%m-%d 05:00:00')
vals.update({'date_from': date_from})
if vals['date_to']:
date = datetime.strptime(vals['date_to'], '%Y-%m-%d %H:%M:%S')
date_to = date.strftime('%Y-%m-%d 05:00:00')
vals.update({'date_to': date_to})
return super(is_resource_calendar_leaves, self).create(cr, uid, vals, context=context)
is_resource_calendar_leaves()
class is_res_partner_calendar(osv.osv):
_inherit = "res.partner"
_columns = {
'calendar_line': fields.one2many('resource.calendar.leaves', 'partner_id', 'Leaves'),
'close_monday': fields.boolean('Monday'),
'close_tuesday': fields.boolean('Tuesday'),
'close_wednesday': fields.boolean('Wednesday'),
'close_thursday': fields.boolean('Thursday'),
'close_friday': fields.boolean('Friday'),
'close_saturday': fields.boolean('Saturday'),
'close_sunday': fields.boolean('Sunday'),
}
_defaults = {
'close_saturday': True,
'close_sunday': True,
}
is_res_partner_calendar()
|
{
"content_hash": "cfa3ab4c90f3deaddaeb349af4c799aa",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 94,
"avg_line_length": 37.03846153846154,
"alnum_prop": 0.5597092419522326,
"repo_name": "tonygalmiche/is_plastigray",
"id": "694591dc222f9dfdb1df669d8fb9c18e1049f105",
"size": "1951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "is_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1702"
},
{
"name": "JavaScript",
"bytes": "22445"
},
{
"name": "Python",
"bytes": "1418292"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "subastas"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
{
"content_hash": "3e2cb2e4b50799e508fdecf836c87f30",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.5941558441558441,
"repo_name": "diegoduncan21/subastas",
"id": "e54478879f749f0873b5488d7479ec87145cda10",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subastas_repo/contrib/sites/migrations/0002_set_site_domain_and_name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1906"
},
{
"name": "HTML",
"bytes": "51394"
},
{
"name": "JavaScript",
"bytes": "2387"
},
{
"name": "Python",
"bytes": "66230"
}
],
"symlink_target": ""
}
|
"""Prepares a licenses.txt file for the package.
The license paths file is expected to contain a series of titles and paths to
output. The paths should be relative to the file itself. For example,
Title: path/to/LICENSE
The extra licenses file is expected to contain input similar to the output;
a series of licenses, each with a title on a separate line prefixed by '@'.
This file is used in the demo to display the various licenses for the
dependencies of the project.
"""
import argparse
import os
import sys
import json
def _GenLicensesFile(out, paths, extras, base_path):
"""Reads the input files, and writes a licenses.txt file to the given output.
Args:
out: A file object for the output.
paths: A file object for the paths file.
extras: A file object for the extra licenses file.
base_path: The URL base used to resolve the relative URLs in the paths file.
"""
licenses = []
for line in paths:
name, path = line.split(': ', 1)
path = os.path.join(base_path, path.rstrip('\n'))
with open(path, 'r') as file:
licenses.append({'name': name, 'text': file.read()})
while True:
name = extras.readline()
if not name: break
text = extras.readline().replace('\\n', '\n')
licenses.append({'name': name, 'text': text})
out.write(json.dumps(licenses))
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--paths-file', required=True,
help='A file that contains paths to licenses.')
parser.add_argument('--extras-file', required=True,
help='A file that contains extra license text, ' +
'copied verbatim.')
parser.add_argument('--output', required=True,
help='The path to the file to generate.')
parsed_args = parser.parse_args(argv)
output_dir = os.path.dirname(parsed_args.output)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(parsed_args.output, 'w') as out:
with open(parsed_args.paths_file, 'r') as paths:
with open(parsed_args.extras_file, 'r') as extras:
base_path = os.path.dirname(parsed_args.paths_file)
_GenLicensesFile(out, paths, extras, base_path);
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "198331495267faad6b434bd89c4d9980",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 33.89705882352941,
"alnum_prop": 0.6637744034707158,
"repo_name": "shaka-project/shaka-player-embedded",
"id": "0a094e364b96f0bdade92bb8069fa183a6133644",
"size": "2900",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "shaka/tools/make_license_file.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4629"
},
{
"name": "C++",
"bytes": "1577496"
},
{
"name": "JavaScript",
"bytes": "96384"
},
{
"name": "Makefile",
"bytes": "1354"
},
{
"name": "Objective-C",
"bytes": "120570"
},
{
"name": "Objective-C++",
"bytes": "65339"
},
{
"name": "Python",
"bytes": "176793"
},
{
"name": "Shell",
"bytes": "1985"
}
],
"symlink_target": ""
}
|
import configparser
import os
import shutil
from copy import deepcopy
from unittest import mock
import pytest
from great_expectations.core.usage_statistics.usage_statistics import (
run_validation_operator_usage_statistics,
)
from great_expectations.data_context import (
BaseDataContext,
DataContext,
EphemeralDataContext,
FileDataContext,
)
from great_expectations.data_context.types.base import DataContextConfig
from great_expectations.data_context.util import file_relative_path
from tests.integration.usage_statistics.test_integration_usage_statistics import (
USAGE_STATISTICS_QA_URL,
)
@pytest.fixture
def in_memory_data_context_config_usage_stats_enabled():
return DataContextConfig(
**{
"commented_map": {},
"config_version": 2,
"plugins_directory": None,
"evaluation_parameter_store_name": "evaluation_parameter_store",
"validations_store_name": "validations_store",
"expectations_store_name": "expectations_store",
"config_variables_file_path": None,
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
},
"validations_store": {
"class_name": "ValidationsStore",
},
"evaluation_parameter_store": {
"class_name": "EvaluationParameterStore",
},
},
"data_docs_sites": {},
"validation_operators": {
"default": {
"class_name": "ActionListValidationOperator",
"action_list": [],
}
},
"anonymous_usage_statistics": {
"enabled": True,
"data_context_id": "00000000-0000-0000-0000-000000000001",
"usage_statistics_url": USAGE_STATISTICS_QA_URL,
},
}
)
@pytest.mark.base_data_context
def test_consistent_name_anonymization(
in_memory_data_context_config_usage_stats_enabled, monkeypatch
):
monkeypatch.delenv(
"GE_USAGE_STATS", raising=False
) # Undo the project-wide test default
context = BaseDataContext(in_memory_data_context_config_usage_stats_enabled)
assert context.data_context_id == "00000000-0000-0000-0000-000000000001"
payload = run_validation_operator_usage_statistics(
context,
"action_list_operator",
assets_to_validate=[
({"__fake_batch_kwargs": "mydatasource"}, "__fake_expectation_suite_name")
],
run_id="foo",
)
# For a *specific* data_context_id, all names will be consistently anonymized
assert payload["anonymized_operator_name"] == "e079c942d946b823312054118b3b6ef4"
@pytest.mark.base_data_context
def test_global_override_environment_variable_base_data_context(
in_memory_data_context_config_usage_stats_enabled, monkeypatch
):
"""Set the env variable GE_USAGE_STATS value to any of the following: FALSE, False, false, 0"""
monkeypatch.setenv("GE_USAGE_STATS", "False")
assert (
in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.enabled
is True
)
context = BaseDataContext(in_memory_data_context_config_usage_stats_enabled)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
@pytest.mark.base_data_context
def test_global_override_from_config_file_in_etc(
in_memory_data_context_config_usage_stats_enabled, tmp_path_factory, monkeypatch
):
monkeypatch.delenv(
"GE_USAGE_STATS", raising=False
) # Undo the project-wide test default
home_config_dir = tmp_path_factory.mktemp("home_dir")
home_config_dir = str(home_config_dir)
etc_config_dir = tmp_path_factory.mktemp("etc")
etc_config_dir = str(etc_config_dir)
config_dirs = [home_config_dir, etc_config_dir]
config_dirs = [
os.path.join(config_dir, "great_expectations.conf")
for config_dir in config_dirs
]
for false_string in ["False", "false", "f", "FALSE"]:
disabled_config = configparser.ConfigParser()
disabled_config["anonymous_usage_statistics"] = {"enabled": false_string}
with open(
os.path.join(etc_config_dir, "great_expectations.conf"), "w"
) as configfile:
disabled_config.write(configfile)
with mock.patch(
"great_expectations.data_context.AbstractDataContext.GLOBAL_CONFIG_PATHS",
config_dirs,
):
assert (
in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.enabled
is True
)
context = BaseDataContext(
deepcopy(in_memory_data_context_config_usage_stats_enabled)
)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
@pytest.mark.base_data_context
def test_global_override_from_config_file_in_home_folder(
in_memory_data_context_config_usage_stats_enabled, tmp_path_factory, monkeypatch
):
monkeypatch.delenv(
"GE_USAGE_STATS", raising=False
) # Undo the project-wide test default
home_config_dir = tmp_path_factory.mktemp("home_dir")
home_config_dir = str(home_config_dir)
etc_config_dir = tmp_path_factory.mktemp("etc")
etc_config_dir = str(etc_config_dir)
config_dirs = [home_config_dir, etc_config_dir]
config_dirs = [
os.path.join(config_dir, "great_expectations.conf")
for config_dir in config_dirs
]
enabled_config = configparser.ConfigParser()
enabled_config["anonymous_usage_statistics"] = {"enabled": "True"}
for false_string in ["False", "false", "f", "FALSE"]:
disabled_config = configparser.ConfigParser()
disabled_config["anonymous_usage_statistics"] = {"enabled": false_string}
with open(
os.path.join(home_config_dir, "great_expectations.conf"), "w"
) as configfile:
disabled_config.write(configfile)
with mock.patch(
"great_expectations.data_context.AbstractDataContext.GLOBAL_CONFIG_PATHS",
config_dirs,
):
assert (
in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.enabled
is True
)
context = BaseDataContext(
deepcopy(in_memory_data_context_config_usage_stats_enabled)
)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
def test_global_override_in_yml(tmp_path_factory, monkeypatch):
monkeypatch.delenv(
"GE_USAGE_STATS", raising=False
) # Undo the project-wide test default
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(context_path, exist_ok=True)
fixture_dir = file_relative_path(__file__, "../../test_fixtures")
shutil.copy(
os.path.join(
fixture_dir, "great_expectations_basic_with_usage_stats_disabled.yml"
),
str(os.path.join(context_path, "great_expectations.yml")),
)
assert (
DataContext(
context_root_dir=context_path
)._project_config.anonymous_usage_statistics.enabled
is False
)
@pytest.mark.base_data_context
def test_global_override_conf_overrides_yml_and_env_variable(
tmp_path_factory, monkeypatch
):
"""
What does this test and why?
anonymous_usage_stats_enabled can be set in the following 3 places
- great_expectations.yml (YML)
- GE_USAGE_STATS environment variable (env)
- conf file (conf)
If it is set as `False` in *any* of the 3 places, the global value is set to `False`
This test tests the following scenario:
- `True` in YML
- `True` in env
- `False` in conf
Therefore the global value is set to `False`
"""
monkeypatch.setenv("GE_USAGE_STATS", "True")
home_config_dir = tmp_path_factory.mktemp("home_dir")
home_config_dir = str(home_config_dir)
etc_config_dir = tmp_path_factory.mktemp("etc")
etc_config_dir = str(etc_config_dir)
config_dirs = [home_config_dir, etc_config_dir]
config_dirs = [
os.path.join(config_dir, "great_expectations.conf")
for config_dir in config_dirs
]
disabled_config = configparser.ConfigParser()
disabled_config["anonymous_usage_statistics"] = {"enabled": "False"}
with open(
os.path.join(etc_config_dir, "great_expectations.conf"), "w"
) as configfile:
disabled_config.write(configfile)
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(context_path, exist_ok=True)
fixture_dir = file_relative_path(__file__, "../../test_fixtures")
shutil.copy(
os.path.join(
fixture_dir, "great_expectations_v013_basic_with_usage_stats_enabled.yml"
),
str(os.path.join(context_path, "great_expectations.yml")),
)
assert (
DataContext(
context_root_dir=context_path
)._project_config.anonymous_usage_statistics.enabled
is True
)
with mock.patch(
"great_expectations.data_context.AbstractDataContext.GLOBAL_CONFIG_PATHS",
config_dirs,
):
context = DataContext(context_root_dir=context_path)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
@pytest.mark.base_data_context
def test_global_override_env_overrides_yml_and_conf(tmp_path_factory, monkeypatch):
"""
What does this test and why?
anonymous_usage_stats_enabled can be set in the following 3 places
- great_expectations.yml (YML)
- GE_USAGE_STATS environment variable (env)
- conf file (conf)
If it is set as `False` in *any* of the 3 places, the global value is set to `False`
This test tests the following scenario:
- `True` in YML
- `False` in env
- `True` in conf
Therefore the global value is set to `False`
"""
monkeypatch.setenv("GE_USAGE_STATS", "False")
home_config_dir = tmp_path_factory.mktemp("home_dir")
home_config_dir = str(home_config_dir)
etc_config_dir = tmp_path_factory.mktemp("etc")
etc_config_dir = str(etc_config_dir)
config_dirs = [home_config_dir, etc_config_dir]
config_dirs = [
os.path.join(config_dir, "great_expectations.conf")
for config_dir in config_dirs
]
disabled_config = configparser.ConfigParser()
disabled_config["anonymous_usage_statistics"] = {"enabled": "True"}
with open(
os.path.join(etc_config_dir, "great_expectations.conf"), "w"
) as configfile:
disabled_config.write(configfile)
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(context_path, exist_ok=True)
fixture_dir = file_relative_path(__file__, "../../test_fixtures")
shutil.copy(
os.path.join(
fixture_dir, "great_expectations_v013_basic_with_usage_stats_enabled.yml"
),
str(os.path.join(context_path, "great_expectations.yml")),
)
assert (
DataContext(
context_root_dir=context_path
)._project_config.anonymous_usage_statistics.enabled
is False
)
with mock.patch(
"great_expectations.data_context.AbstractDataContext.GLOBAL_CONFIG_PATHS",
config_dirs,
):
context = DataContext(context_root_dir=context_path)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
@pytest.mark.base_data_context
def test_global_override_yml_overrides_env_and_conf(tmp_path_factory, monkeypatch):
"""
What does this test and why?
anonymous_usage_stats_enabled can be set in the following 3 places
- great_expectations.yml (YML)
- GE_USAGE_STATS environment variable (env)
- conf file (conf)
If it is set as `False` in *any* of the 3 places, the global value is set to `False`
This test tests the following scenario:
- `False` in YML
- `True` in env
- `True` in conf
Therefore the global value is set to `False`
"""
monkeypatch.setenv("GE_USAGE_STATS", "True")
home_config_dir = tmp_path_factory.mktemp("home_dir")
home_config_dir = str(home_config_dir)
etc_config_dir = tmp_path_factory.mktemp("etc")
etc_config_dir = str(etc_config_dir)
config_dirs = [home_config_dir, etc_config_dir]
config_dirs = [
os.path.join(config_dir, "great_expectations.conf")
for config_dir in config_dirs
]
disabled_config = configparser.ConfigParser()
disabled_config["anonymous_usage_statistics"] = {"enabled": "True"}
with open(
os.path.join(etc_config_dir, "great_expectations.conf"), "w"
) as configfile:
disabled_config.write(configfile)
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(context_path, exist_ok=True)
fixture_dir = file_relative_path(__file__, "../../test_fixtures")
shutil.copy(
os.path.join(
fixture_dir, "great_expectations_basic_with_usage_stats_disabled.yml"
),
str(os.path.join(context_path, "great_expectations.yml")),
)
assert (
DataContext(
context_root_dir=context_path
)._project_config.anonymous_usage_statistics.enabled
is False
)
with mock.patch(
"great_expectations.data_context.AbstractDataContext.GLOBAL_CONFIG_PATHS",
config_dirs,
):
context = DataContext(context_root_dir=context_path)
project_config = context._project_config
assert project_config.anonymous_usage_statistics.enabled is False
|
{
"content_hash": "3d89110b95a4bf3b6e2fdfec5d0889c4",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 100,
"avg_line_length": 35.18472906403941,
"alnum_prop": 0.64025201260063,
"repo_name": "great-expectations/great_expectations",
"id": "65e1393fafc8d6a996666dce02bf447a001d3003",
"size": "14285",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/core/usage_statistics/test_usage_statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
}
|
"""
Unit Tests for :py:class:`ironic.conductor.rpcapi.ConductorAPI`.
"""
import copy
import mock
from oslo.config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import manager as conductor_manager
from ironic.conductor import rpcapi as conductor_rpcapi
from ironic.db import api as dbapi
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.db import base
from ironic.tests.db import utils as dbutils
CONF = cfg.CONF
class ConductorRPCAPITestCase(tests_base.TestCase):
def test_versions_in_sync(self):
self.assertEqual(
conductor_manager.ConductorManager.RPC_API_VERSION,
conductor_rpcapi.ConductorAPI.RPC_API_VERSION)
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.fake_node = dbutils.get_test_node(driver='fake-driver')
self.fake_node_obj = objects.Node._from_db_object(
objects.Node(self.context),
self.fake_node)
def test_serialized_instance_has_uuid(self):
self.assertTrue('uuid' in self.fake_node)
def test_get_topic_for_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['other-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
def test_get_topic_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_driver_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def test_get_topic_for_driver_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['other-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
def test_get_topic_for_driver_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(kwargs[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi.client, rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(self.context, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [self.context, method, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_update_node(self):
self._test_rpcapi('update_node',
'call',
version='1.1',
node_obj=self.fake_node)
def test_change_node_power_state(self):
self._test_rpcapi('change_node_power_state',
'call',
version='1.6',
node_id=self.fake_node['uuid'],
new_state=states.POWER_ON)
def test_pass_vendor_info(self):
self._test_rpcapi('vendor_passthru',
'call',
version='1.12',
node_id=self.fake_node['uuid'],
driver_method='test-driver-method',
info={"test_info": "test_value"})
def test_driver_vendor_passthru(self):
self._test_rpcapi('driver_vendor_passthru',
'call',
version='1.14',
driver_name='test-driver-name',
driver_method='test-driver-method',
info={'test_key': 'test_value'})
def test_do_node_deploy(self):
self._test_rpcapi('do_node_deploy',
'call',
version='1.15',
node_id=self.fake_node['uuid'],
rebuild=False)
def test_do_node_tear_down(self):
self._test_rpcapi('do_node_tear_down',
'call',
version='1.6',
node_id=self.fake_node['uuid'])
def test_validate_driver_interfaces(self):
self._test_rpcapi('validate_driver_interfaces',
'call',
version='1.5',
node_id=self.fake_node['uuid'])
def test_change_node_maintenance_mode(self):
self._test_rpcapi('change_node_maintenance_mode',
'call',
version='1.8',
node_id=self.fake_node['uuid'],
mode=True)
def test_destroy_node(self):
self._test_rpcapi('destroy_node',
'call',
version='1.9',
node_id=self.fake_node['uuid'])
def test_get_console_information(self):
self._test_rpcapi('get_console_information',
'call',
version='1.11',
node_id=self.fake_node['uuid'])
def test_set_console_mode(self):
self._test_rpcapi('set_console_mode',
'call',
version='1.11',
node_id=self.fake_node['uuid'],
enabled=True)
def test_update_port(self):
fake_port = dbutils.get_test_port()
self._test_rpcapi('update_port',
'call',
version='1.13',
port_obj=fake_port)
def test_get_driver_properties(self):
self._test_rpcapi('get_driver_properties',
'call',
version='1.16',
driver_name='fake-driver')
def test_set_boot_device(self):
self._test_rpcapi('set_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'],
device=boot_devices.DISK,
persistent=False)
def test_get_boot_device(self):
self._test_rpcapi('get_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_supported_boot_devices(self):
self._test_rpcapi('get_supported_boot_devices',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
|
{
"content_hash": "5a0fab0baf0fd7e88662ea0bb5067afa",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 38.12595419847328,
"alnum_prop": 0.5186705375913505,
"repo_name": "CiscoUcs/Ironic-UCS",
"id": "47a938c4767f874e7ad5e937ff13ce350b4884d0",
"size": "10661",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib.linux-x86_64-2.7/ironic/tests/conductor/test_rpcapi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "64257"
},
{
"name": "Python",
"bytes": "4547133"
},
{
"name": "Ruby",
"bytes": "1296"
},
{
"name": "Shell",
"bytes": "1"
}
],
"symlink_target": ""
}
|
import datetime
import io
import os
import textwrap
from enum import Enum
# keep below as absolute imports
from flopy.mf6.data import mfdatautil, mfstructure
from flopy.utils import datautil
"""
createpackages.py is a utility script that reads in the file definition
metadata in the .dfn files to create the package classes in the modflow folder.
Run this script any time changes are made to the .dfn files.
"""
class PackageLevel(Enum):
sim_level = 0
model_level = 1
def build_doc_string(param_name, param_type, param_desc, indent):
return f"{indent}{param_name} : {param_type}\n{indent * 2}* {param_desc}"
def generator_type(data_type):
if (
data_type == mfstructure.DataType.scalar_keyword
or data_type == mfstructure.DataType.scalar
):
# regular scalar
return "ScalarTemplateGenerator"
elif (
data_type == mfstructure.DataType.scalar_keyword_transient
or data_type == mfstructure.DataType.scalar_transient
):
# transient scalar
return "ScalarTemplateGenerator"
elif data_type == mfstructure.DataType.array:
# array
return "ArrayTemplateGenerator"
elif data_type == mfstructure.DataType.array_transient:
# transient array
return "ArrayTemplateGenerator"
elif data_type == mfstructure.DataType.list:
# list
return "ListTemplateGenerator"
elif (
data_type == mfstructure.DataType.list_transient
or data_type == mfstructure.DataType.list_multiple
):
# transient or multiple list
return "ListTemplateGenerator"
def clean_class_string(name):
if len(name) > 0:
clean_string = name.replace(" ", "_")
clean_string = clean_string.replace("-", "_")
version = mfstructure.MFStructure().get_version_string()
# FIX: remove all numbers
if clean_string[-1] == version:
clean_string = clean_string[:-1]
return clean_string
return name
def build_dfn_string(dfn_list, header):
dfn_string = " dfn = ["
line_length = len(dfn_string)
leading_spaces = " " * line_length
first_di = True
# process header
dfn_string = f'{dfn_string}\n{leading_spaces}["header", '
for key, value in header.items():
if key == "multi-package":
dfn_string = f'{dfn_string}\n{leading_spaces} "multi-package", '
dfn_string = f"{dfn_string}],\n{leading_spaces}"
# process all data items
for data_item in dfn_list:
line_length += 1
if not first_di:
dfn_string = f"{dfn_string},\n{leading_spaces}"
line_length = len(leading_spaces)
else:
first_di = False
dfn_string = f"{dfn_string}["
first_line = True
# process each line in a data item
for line in data_item:
line = line.strip()
# do not include the description of longname
if not line.lower().startswith(
"description"
) and not line.lower().startswith("longname"):
line = line.replace('"', "'")
line_length += len(line) + 4
if not first_line:
dfn_string = f"{dfn_string},"
if line_length < 77:
# added text fits on the current line
if first_line:
dfn_string = f'{dfn_string}"{line}"'
else:
dfn_string = f'{dfn_string} "{line}"'
else:
# added text does not fit on the current line
line_length = len(line) + len(leading_spaces) + 2
if line_length > 79:
# added text too long to fit on a single line, wrap
# text as needed
line = f'"{line}"'
lines = textwrap.wrap(
line,
75 - len(leading_spaces),
drop_whitespace=True,
)
lines[0] = f"{leading_spaces} {lines[0]}"
line_join = f' "\n{leading_spaces} "'
dfn_string = f"{dfn_string}\n{line_join.join(lines)}"
else:
dfn_string = f'{dfn_string}\n{leading_spaces} "{line}"'
first_line = False
dfn_string = f"{dfn_string}]"
dfn_string = f"{dfn_string}]"
return dfn_string
def create_init_var(clean_ds_name, data_structure_name, init_val=None):
if init_val is None:
init_val = clean_ds_name
init_var = f" self.{clean_ds_name} = self.build_mfdata("
leading_spaces = " " * len(init_var)
if len(init_var) + len(data_structure_name) + 2 > 79:
second_line = f'\n "{data_structure_name}",'
if len(second_line) + len(clean_ds_name) + 2 > 79:
init_var = f"{init_var}{second_line}\n {init_val})"
else:
init_var = f"{init_var}{second_line} {init_val})"
else:
init_var = f'{init_var}"{data_structure_name}",'
if len(init_var) + len(clean_ds_name) + 2 > 79:
init_var = f"{init_var}\n{leading_spaces}{init_val})"
else:
init_var = f"{init_var} {init_val})"
return init_var
def create_basic_init(clean_ds_name):
return f" self.{clean_ds_name} = {clean_ds_name}\n"
def create_property(clean_ds_name):
return f" {clean_ds_name} = property(get_{clean_ds_name}, set_{clean_ds_name})"
def format_var_list(base_string, var_list, is_tuple=False):
if is_tuple:
base_string = f"{base_string}("
extra_chars = 4
else:
extra_chars = 2
line_length = len(base_string)
leading_spaces = " " * line_length
# determine if any variable name is too long to fit
for item in var_list:
if line_length + len(item) + extra_chars > 80:
leading_spaces = " "
base_string = f"{base_string}\n{leading_spaces}"
line_length = len(leading_spaces)
break
for index, item in enumerate(var_list):
if is_tuple:
item = f"'{item}'"
if index == len(var_list) - 1:
next_var_str = item
else:
next_var_str = f"{item},"
line_length += len(item) + extra_chars
if line_length > 80:
base_string = f"{base_string}\n{leading_spaces}{next_var_str}"
else:
if base_string[-1] == ",":
base_string = f"{base_string} "
base_string = f"{base_string}{next_var_str}"
if is_tuple:
return f"{base_string}))"
else:
return f"{base_string})"
def create_package_init_var(parameter_name, package_abbr, data_name):
one_line = (
f" self._{package_abbr}_package = self.build_child_package("
)
one_line_b = f'"{package_abbr}", {parameter_name},'
leading_spaces = " " * len(one_line)
two_line = f'\n{leading_spaces}"{data_name}",'
three_line = f"\n{leading_spaces}self._{package_abbr}_filerecord)"
return f"{one_line}{one_line_b}{two_line}{three_line}"
def add_var(
init_vars,
class_vars,
init_param_list,
package_properties,
doc_string,
data_structure_dict,
default_value,
name,
python_name,
description,
path,
data_type,
basic_init=False,
construct_package=None,
construct_data=None,
parameter_name=None,
set_param_list=None,
):
if set_param_list is None:
set_param_list = []
clean_ds_name = datautil.clean_name(python_name)
if construct_package is None:
# add variable initialization lines
if basic_init:
init_vars.append(create_basic_init(clean_ds_name))
else:
init_vars.append(create_init_var(clean_ds_name, name))
# add to parameter list
if default_value is None:
default_value = "None"
init_param_list.append(f"{clean_ds_name}={default_value}")
# add to set parameter list
set_param_list.append(f"{clean_ds_name}={clean_ds_name}")
else:
clean_parameter_name = datautil.clean_name(parameter_name)
# init hidden variable
init_vars.append(create_init_var(f"_{clean_ds_name}", name, "None"))
# init child package
init_vars.append(
create_package_init_var(
clean_parameter_name, construct_package, construct_data
)
)
# add to parameter list
init_param_list.append(f"{clean_parameter_name}=None")
# add to set parameter list
set_param_list.append(f"{clean_parameter_name}={clean_parameter_name}")
package_properties.append(create_property(clean_ds_name))
doc_string.add_parameter(description, model_parameter=True)
data_structure_dict[python_name] = 0
if class_vars is not None:
gen_type = generator_type(data_type)
if gen_type != "ScalarTemplateGenerator":
new_class_var = f" {clean_ds_name} = {gen_type}("
class_vars.append(format_var_list(new_class_var, path, True))
return gen_type
return None
def build_init_string(
init_string, init_param_list, whitespace=" "
):
line_chars = len(init_string)
for index, param in enumerate(init_param_list):
if index + 1 < len(init_param_list):
line_chars += len(param) + 2
else:
line_chars += len(param) + 3
if line_chars > 79:
if len(param) + len(whitespace) + 1 > 79:
# try to break apart at = sign
param_list = param.split("=")
if len(param_list) == 2:
init_string = "{},\n{}{}=\n{}{}".format(
init_string,
whitespace,
param_list[0],
whitespace,
param_list[1],
)
line_chars = len(param_list[1]) + len(whitespace) + 1
continue
init_string = f"{init_string},\n{whitespace}{param}"
line_chars = len(param) + len(whitespace) + 1
else:
init_string = f"{init_string}, {param}"
return f"{init_string}):\n"
def build_model_load(model_type):
model_load_c = (
" Methods\n -------\n"
" load : (simulation : MFSimulationData, model_name : "
"string,\n namfile : string, "
"version : string, exe_name : string,\n model_ws : "
"string, strict : boolean) : MFSimulation\n"
" a class method that loads a model from files"
'\n """'
)
model_load = (
" @classmethod\n def load(cls, simulation, structure, "
"modelname='NewModel',\n "
"model_nam_file='modflowtest.nam', version='mf6',\n"
" exe_name='mf6.exe', strict=True, "
"model_rel_path='.',\n"
" load_only=None):\n "
"return mfmodel.MFModel.load_base(simulation, structure, "
"modelname,\n "
"model_nam_file, '{}', version,\n"
" exe_name, strict, "
"model_rel_path,\n"
" load_only)"
"\n".format(model_type)
)
return model_load, model_load_c
def build_model_init_vars(param_list):
init_var_list = []
for param in param_list:
param_parts = param.split("=")
init_var_list.append(
f" self.name_file.{param_parts[0]}.set_data({param_parts[0]})"
)
return "\n".join(init_var_list)
def create_packages():
indent = " "
init_string_def = " def __init__(self"
# load JSON file
file_structure = mfstructure.MFStructure(load_from_dfn_files=True)
sim_struct = file_structure.sim_struct
# assemble package list of buildable packages
package_list = []
package_list.append(
(
sim_struct.name_file_struct_obj,
PackageLevel.sim_level,
"",
sim_struct.name_file_struct_obj.dfn_list,
sim_struct.name_file_struct_obj.file_type,
sim_struct.name_file_struct_obj.header,
)
)
for package in sim_struct.package_struct_objs.values():
# add simulation level package to list
package_list.append(
(
package,
PackageLevel.sim_level,
"",
package.dfn_list,
package.file_type,
package.header,
)
)
for package in sim_struct.utl_struct_objs.values():
# add utility packages to list
package_list.append(
(
package,
PackageLevel.model_level,
"utl",
package.dfn_list,
package.file_type,
package.header,
)
)
for model_key, model in sim_struct.model_struct_objs.items():
package_list.append(
(
model.name_file_struct_obj,
PackageLevel.model_level,
model_key,
model.name_file_struct_obj.dfn_list,
model.name_file_struct_obj.file_type,
model.name_file_struct_obj.header,
)
)
for package in model.package_struct_objs.values():
package_list.append(
(
package,
PackageLevel.model_level,
model_key,
package.dfn_list,
package.file_type,
package.header,
)
)
util_path, tail = os.path.split(os.path.realpath(__file__))
init_file = io.open(
os.path.join(util_path, "..", "modflow", "__init__.py"),
"w",
newline="\n",
)
init_file.write("from .mfsimulation import MFSimulation # isort:skip\n")
nam_import_string = (
"from .. import mfmodel\nfrom ..data.mfdatautil "
"import ArrayTemplateGenerator, ListTemplateGenerator"
)
# loop through packages list
init_file_imports = []
for package in package_list:
data_structure_dict = {}
package_properties = []
init_vars = []
init_param_list = []
set_param_list = []
class_vars = []
template_gens = []
dfn_string = build_dfn_string(package[3], package[5])
package_abbr = clean_class_string(
f"{clean_class_string(package[2])}{package[0].file_type}"
).lower()
package_name = clean_class_string(
"{}{}{}".format(
clean_class_string(package[2]),
package[0].file_prefix,
package[0].file_type,
)
).lower()
if package[0].description:
doc_string = mfdatautil.MFDocString(package[0].description)
else:
if package[2]:
package_container_text = f" within a {package[2]} model"
else:
package_container_text = ""
ds = "Modflow{} defines a {} package{}.".format(
package_name.title(),
package[0].file_type,
package_container_text,
)
if package[0].file_type == "mvr":
# mvr package warning
if package[2]:
ds = (
"{} This package\n can only be used to move "
"water between packages within a single model."
"\n To move water between models use ModflowMvr"
".".format(ds)
)
else:
ds = (
"{} This package can only be used to move\n "
"water between two different models. To move "
"water between two packages\n in the same "
'model use the "model level" mover package (ex. '
"ModflowGwfmvr).".format(ds)
)
doc_string = mfdatautil.MFDocString(ds)
if package[0].dfn_type == mfstructure.DfnType.exch_file:
add_var(
init_vars,
None,
init_param_list,
package_properties,
doc_string,
data_structure_dict,
None,
"exgtype",
"exgtype",
build_doc_string(
"exgtype",
"<string>",
"is the exchange type (GWF-GWF or GWF-GWT).",
indent,
),
None,
None,
True,
)
add_var(
init_vars,
None,
init_param_list,
package_properties,
doc_string,
data_structure_dict,
None,
"exgmnamea",
"exgmnamea",
build_doc_string(
"exgmnamea",
"<string>",
"is the name of the first model that is "
"part of this exchange.",
indent,
),
None,
None,
True,
)
add_var(
init_vars,
None,
init_param_list,
package_properties,
doc_string,
data_structure_dict,
None,
"exgmnameb",
"exgmnameb",
build_doc_string(
"exgmnameb",
"<string>",
"is the name of the second model that is "
"part of this exchange.",
indent,
),
None,
None,
True,
)
init_vars.append(
" simulation.register_exchange_file(self)\n"
)
# loop through all blocks
for block in package[0].blocks.values():
for data_structure in block.data_structures.values():
# only create one property for each unique data structure name
if data_structure.name not in data_structure_dict:
tg = add_var(
init_vars,
class_vars,
init_param_list,
package_properties,
doc_string,
data_structure_dict,
data_structure.default_value,
data_structure.name,
data_structure.python_name,
data_structure.get_doc_string(79, indent, indent),
data_structure.path,
data_structure.get_datatype(),
False,
data_structure.construct_package,
data_structure.construct_data,
data_structure.parameter_name,
set_param_list,
)
if tg is not None and tg not in template_gens:
template_gens.append(tg)
import_string = "from .. import mfpackage"
if template_gens:
import_string += "\nfrom ..data.mfdatautil import "
import_string += ", ".join(sorted(template_gens))
# add extra docstrings for additional variables
doc_string.add_parameter(
" filename : String\n File name for this package."
)
doc_string.add_parameter(
" pname : String\n Package name for this package."
)
doc_string.add_parameter(
" parent_file : MFPackage\n "
"Parent package file that references this "
"package. Only needed for\n utility "
"packages (mfutl*). For example, mfutllaktab "
"package must have \n a mfgwflak "
"package parent_file."
)
# build package builder class string
init_vars.append(" self._init_complete = True")
init_vars = "\n".join(init_vars)
package_short_name = clean_class_string(package[0].file_type).lower()
class_def_string = "class Modflow{}(mfpackage.MFPackage):\n".format(
package_name.title()
)
class_def_string = class_def_string.replace("-", "_")
class_var_string = (
'{}\n package_abbr = "{}"\n _package_type = '
'"{}"\n dfn_file_name = "{}"'
"\n".format(
"\n".join(class_vars),
package_abbr,
package[4],
package[0].dfn_file_name,
)
)
init_string_full = init_string_def
init_string_model = f"{init_string_def}, simulation"
# add variables to init string
doc_string.add_parameter(
" loading_package : bool\n "
"Do not set this parameter. It is intended "
"for debugging and internal\n "
"processing purposes only.",
beginning_of_list=True,
)
if package[1] == PackageLevel.sim_level:
doc_string.add_parameter(
" simulation : MFSimulation\n "
"Simulation that this package is a part "
"of. Package is automatically\n "
"added to simulation when it is "
"initialized.",
beginning_of_list=True,
)
init_string_full = (
f"{init_string_full}, simulation, loading_package=False"
)
else:
doc_string.add_parameter(
" model : MFModel\n "
"Model that this package is a part of. "
"Package is automatically\n added "
"to model when it is initialized.",
beginning_of_list=True,
)
init_string_full = (
f"{init_string_full}, model, loading_package=False"
)
init_param_list.append("filename=None")
init_param_list.append("pname=None")
init_param_list.append("parent_file=None")
init_string_full = build_init_string(init_string_full, init_param_list)
# build init code
if package[1] == PackageLevel.sim_level:
init_var = "simulation"
else:
init_var = "model"
parent_init_string = " super().__init__("
spaces = " " * len(parent_init_string)
parent_init_string = (
'{}{}, "{}", filename, pname,\n{}'
"loading_package, parent_file)\n\n"
" # set up variables".format(
parent_init_string, init_var, package_short_name, spaces
)
)
local_datetime = datetime.datetime.now(datetime.timezone.utc)
comment_string = (
"# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE "
"MUST BE CREATED BY\n# mf6/utils/createpackages.py\n"
"# FILE created on {} UTC".format(
local_datetime.strftime("%B %d, %Y %H:%M:%S")
)
)
# assemble full package string
package_string = "{}\n{}\n\n\n{}{}\n{}\n{}\n\n{}{}\n{}\n".format(
comment_string,
import_string,
class_def_string,
doc_string.get_doc_string(),
class_var_string,
dfn_string,
init_string_full,
parent_init_string,
init_vars,
)
# open new Packages file
pb_file = io.open(
os.path.join(util_path, "..", "modflow", f"mf{package_name}.py"),
"w",
newline="\n",
)
pb_file.write(package_string)
if package[2] == "utl" and package_abbr != "utltab":
set_param_list.append("filename=filename")
set_param_list.append("pname=pname")
set_param_list.append("parent_file=self._cpparent")
whsp_1 = " "
whsp_2 = " "
chld_doc_string = (
' """\n Utl{}Packages is a container '
"class for the ModflowUtl{} class.\n\n "
"Methods\n ----------"
"\n".format(package_short_name, package_short_name)
)
# write out child packages class
chld_cls = (
"\n\nclass Utl{}Packages(mfpackage.MFChildPackage"
"s):\n".format(package_short_name)
)
chld_var = (
f' package_abbr = "utl{package_short_name}packages"\n\n'
)
chld_init = " def initialize(self"
chld_init = build_init_string(
chld_init, init_param_list[:-1], whsp_1
)
init_pkg = "\n self._init_package(new_package, filename)"
params_init = f" new_package = ModflowUtl{package_short_name}(self._model"
params_init = build_init_string(
params_init, set_param_list, whsp_2
)
chld_doc_string = (
"{} initialize\n Initializes a new "
"ModflowUtl{} package removing any sibling "
"child\n packages attached to the same "
"parent package. See ModflowUtl{} init\n "
" documentation for definition of "
"parameters.\n".format(
chld_doc_string, package_short_name, package_short_name
)
)
chld_appn = ""
params_appn = ""
append_pkg = ""
if package_abbr != "utlobs": # Hard coded obs no multi-pkg support
chld_appn = "\n\n def append_package(self"
chld_appn = build_init_string(
chld_appn, init_param_list[:-1], whsp_1
)
append_pkg = (
"\n self._append_package(new_package, filename)"
)
params_appn = f" new_package = ModflowUtl{package_short_name}(self._model"
params_appn = build_init_string(
params_appn, set_param_list, whsp_2
)
chld_doc_string = (
"{} append_package\n Adds a "
"new ModflowUtl{} package to the container."
" See ModflowUtl{}\n init "
"documentation for definition of "
"parameters.\n".format(
chld_doc_string, package_short_name, package_short_name
)
)
chld_doc_string = f'{chld_doc_string} """\n'
packages_str = "{}{}{}{}{}{}{}{}{}\n".format(
chld_cls,
chld_doc_string,
chld_var,
chld_init,
params_init[:-2],
init_pkg,
chld_appn,
params_appn[:-2],
append_pkg,
)
pb_file.write(packages_str)
pb_file.close()
init_file_imports.append(
f"from .mf{package_name} import Modflow{package_name.title()}\n"
)
if package[0].dfn_type == mfstructure.DfnType.model_name_file:
# build model file
model_param_list = init_param_list[:-3]
init_vars = build_model_init_vars(model_param_list)
model_param_list.insert(0, "model_rel_path='.'")
model_param_list.insert(0, "exe_name='mf6.exe'")
model_param_list.insert(0, "version='mf6'")
model_param_list.insert(0, "model_nam_file=None")
model_param_list.insert(0, "modelname='model'")
model_param_list.append("**kwargs,")
init_string_model = build_init_string(
init_string_model, model_param_list
)
model_name = clean_class_string(package[2])
class_def_string = "class Modflow{}(mfmodel.MFModel):\n".format(
model_name.capitalize()
)
class_def_string = class_def_string.replace("-", "_")
doc_string.add_parameter(
" sim : MFSimulation\n "
"Simulation that this model is a part "
"of. Model is automatically\n "
"added to simulation when it is "
"initialized.",
beginning_of_list=True,
model_parameter=True,
)
doc_string.description = (
f"Modflow{model_name} defines a {model_name} model"
)
class_var_string = f" model_type = '{model_name}'\n"
mparent_init_string = " super().__init__("
spaces = " " * len(mparent_init_string)
mparent_init_string = (
"{}simulation, model_type='{}6',\n{}"
"modelname=modelname,\n{}"
"model_nam_file=model_nam_file,\n{}"
"version=version, exe_name=exe_name,\n{}"
"model_rel_path=model_rel_path,\n{}"
"**kwargs,"
")\n".format(
mparent_init_string,
model_name,
spaces,
spaces,
spaces,
spaces,
spaces,
)
)
load_txt, doc_text = build_model_load(model_name)
package_string = "{}\n{}\n\n\n{}{}\n{}\n{}\n{}{}\n{}\n\n{}".format(
comment_string,
nam_import_string,
class_def_string,
doc_string.get_doc_string(True),
doc_text,
class_var_string,
init_string_model,
mparent_init_string,
init_vars,
load_txt,
)
md_file = io.open(
os.path.join(util_path, "..", "modflow", f"mf{model_name}.py"),
"w",
newline="\n",
)
md_file.write(package_string)
md_file.close()
init_file_imports.append(
f"from .mf{model_name} import Modflow{model_name.capitalize()}\n"
)
# Sort the imports
for line in sorted(init_file_imports, key=lambda x: x.split()[3]):
init_file.write(line)
init_file.close()
if __name__ == "__main__":
create_packages()
|
{
"content_hash": "0024ef97dbdf7bed1097721afc26dc5b",
"timestamp": "",
"source": "github",
"line_count": 851,
"max_line_length": 97,
"avg_line_length": 36.71562867215041,
"alnum_prop": 0.4874699951992319,
"repo_name": "jentjr/flopy",
"id": "72721a78e1edfd46ce6fbcd4a989a2329fcd0088",
"size": "31245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flopy/mf6/utils/createpackages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "832"
},
{
"name": "CSS",
"bytes": "321"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6353118"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
}
|
from .CakeModel import CakeModel
class SourceAffiliate(CakeModel):
def __init__(self, **kwargs):
self.param_defaults = {
'affiliate_id': None,
'affiliate_name': None,
'third_party_name': None,
'tier': None,
'account_managers': None,
'account_status': None,
'inactive_reason': None,
'address': None,
'website': None,
'payment_type': None,
'contacts': None,
'tags': None,
'traffic_types': None,
'minimum_payment_threshold': None,
'auto_payment_fee': None,
'payment_to': None,
'tax_class': None,
'ssn_tax_id': None,
'pay_vat': None,
'swift_iban': None,
'referrals_enabled': None,
'referred_by_affiliate': None,
'referral_info': None,
'billing_cycle': None,
'currency_settings': None,
'quickbooks_id': None,
'online_signup': None,
'signup_ip_address': None,
'pay_for_conversions': None,
'review': None,
'review_new_subaffiliates': None,
'suppression': None,
'suppression_cap': None,
'pixel_info': None,
'fire_global_pixel': None,
'blacklists': None,
'redirect_domain_override': None,
'auto_approve_campaigns': None,
'auto_approve_pixels': None,
'hide_offers': None,
'api_key': None,
'date_created': None,
'date_last_accepted_terms': None,
'notes': None
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
|
{
"content_hash": "4f647727a0fa128d0bd29357d1ff82fd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 54,
"avg_line_length": 25.87037037037037,
"alnum_prop": 0.6320687186828919,
"repo_name": "heytimj/pycake",
"id": "49e37d084aa37925d7a2fe5e6e7ea64cea6b6105",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycake/models/SourceAffiliate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156997"
}
],
"symlink_target": ""
}
|
import networkx
from collections import defaultdict
from bingraphvis import *
from bingraphvis.angr import *
from bingraphvis.angr.x86 import *
def set_plot_style(c):
set_style(c)
def plot_common(graph, fname, format="png", type=True):
vis = AngrVisFactory().default_common_graph_pipeline(type=type)
vis.set_output(DotOutput(fname, format=format))
vis.process(graph)
def plot_cfg(cfg, fname, format="png", state=None, asminst=False, vexinst=False, func_addr=None, remove_imports=True, remove_path_terminator=True, remove_simprocedures=False, debug_info=False, comments=True, color_depth=False):
vis = AngrVisFactory().default_cfg_pipeline(cfg, asminst=asminst, vexinst=vexinst, comments=comments)
if remove_imports:
vis.add_transformer(AngrRemoveImports(cfg.project))
if remove_simprocedures:
vis.add_transformer(AngrRemoveSimProcedures())
if func_addr:
vis.add_transformer(AngrFilterNodes(lambda node: node.obj.function_address in func_addr and func_addr[node.obj.function_address]))
if debug_info:
vis.add_content(AngrCFGDebugInfo())
if state:
vis.add_edge_annotator(AngrPathAnnotator(state))
vis.add_node_annotator(AngrPathAnnotator(state))
if color_depth:
vis.add_clusterer(AngrCallstackKeyClusterer())
vis.add_clusterer(ColorDepthClusterer(palette='greens'))
vis.set_output(DotOutput(fname, format=format))
vis.process(cfg.graph)
def plot_func_graph(project, graph, fname, format="png", asminst=True, ailinst=True, vexinst=False, structure=None, color_depth=False):
vis = AngrVisFactory().default_func_graph_pipeline(project, asminst=asminst, ailinst=ailinst, vexinst=vexinst)
if structure:
vis.add_clusterer(AngrStructuredClusterer(structure))
if color_depth:
vis.add_clusterer(ColorDepthClusterer(palette='greens'))
vis.set_output(DotOutput(fname, format=format))
vis.process(graph)
#Note: method signature may be changed in the future
def plot_structured_graph(project, structure, fname, format="png", asminst=True, ailinst=True, vexinst=False, color_depth=False):
vis = AngrVisFactory().default_structured_graph_pipeline(project, asminst=asminst, ailinst=ailinst, vexinst=vexinst)
if color_depth:
vis.add_clusterer(ColorDepthClusterer(palette='greens'))
vis.set_output(DotOutput(fname, format=format))
vis.process(structure)
def plot_cg(kb, fname, format="png", verbose=False, filter=None):
vis = AngrVisFactory().default_cg_pipeline(kb, verbose=verbose)
vis.set_output(DotOutput(fname, format=format))
vis.process(kb, filter)
def plot_cdg(cfg, cdg, fname, format="png", pd_edges=False, cg_edges=True, remove_fakeret=True):
vis = AngrVisFactory().default_cfg_pipeline(cfg, asminst=True, vexinst=False, color_edges=False)
if remove_fakeret:
vis.add_transformer(AngrRemoveFakeretEdges())
if pd_edges:
vis.add_transformer(AngrAddEdges(cdg.get_post_dominators(), color="green", reverse=True))
if cg_edges:
vis.add_transformer(AngrAddEdges(cdg.graph, color="purple", reverse=False))
vis.set_output(DotOutput(fname, format=format))
vis.process(cfg.graph)
def plot_dfg(dfg, fname, format="png"):
vis = AngrVisFactory().default_common_graph_pipeline(type=True)
vis.set_output(DotOutput(fname, format=format))
vis.process(dfg)
#Note: method signature may change in the future
def plot_ddg_stmt(ddg_stmt, fname, format="png", project=None):
vis = AngrVisFactory().default_common_graph_pipeline()
if project:
vis.add_content(AngrAsm(project))
vis.add_content(AngrVex(project))
vis.add_edge_annotator(AngrColorDDGStmtEdges(project))
vis.set_output(DotOutput(fname, format=format))
vis.process(ddg_stmt)
#Note: method signature may change in the future
def plot_ddg_data(ddg_data, fname, format="png", project=None, asminst=False, vexinst=True):
vis = Vis()
vis.set_source(AngrCommonSource())
vis.add_content(AngrDDGLocationHead())
vis.add_content(AngrDDGVariableHead(project=project))
if project:
if asminst:
vis.add_content(AngrAsm(project))
if vexinst:
vis.add_content(AngrVex(project))
acd = AngrColorDDGData(project, labels=True)
vis.add_edge_annotator(acd)
vis.add_node_annotator(acd)
vis.set_output(DotOutput(fname, format=format))
vis.process(ddg_data)
|
{
"content_hash": "dc5964bcde8aa4ef82d0fdd2ce6c3a90",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 227,
"avg_line_length": 44.52,
"alnum_prop": 0.7196765498652291,
"repo_name": "axt/angr-utils",
"id": "3d26952670409b6cd8d1e8152c3d4b24075a36d4",
"size": "4452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angrutils/visualize.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15374"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import redis
import unittest
class PipelineTestCase(unittest.TestCase):
def setUp(self):
self.client = redis.Redis(host='localhost', port=6379, db=9)
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_pipeline(self):
with self.client.pipeline() as pipe:
pipe.set('a', 'a1').get('a').zadd('z', z1=1).zadd('z', z2=4)
pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True)
self.assertEquals(pipe.execute(),
[
True,
'a1',
True,
True,
2.0,
[('z1', 2.0), ('z2', 4)],
]
)
def test_transaction_cant_scatter_gather(self):
with self.client.pipeline(transaction=True) as pipe:
pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1')
self.assertRaises(redis.RedisError, pipe.execute, scatter_gather = True)
def test_pipeline_set_scatter_gather(self):
with self.client.pipeline(transaction=False) as pipe:
pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1')
gather_fn = pipe.execute(scatter_gather = True)
self.assert_(callable(gather_fn))
self.assertEquals(gather_fn(), [True, True, True])
self.assertEquals(self.client['a'], 'a1')
self.assertEquals(self.client['b'], 'b1')
self.assertEquals(self.client['c'], 'c1')
def test_pipeline_scatter_without_gather_reuse_pipeline(self):
p = self.client.pipeline(transaction = False)
p.set('hi', 'ok')
p.get('hi')
p.execute(scatter_gather = True)
self.assertRaises(redis.RedisError, p.zadd, 'hi', 'ok', 1)
def test_pipeline_scatter_with_gather_reuse_pipeline(self):
p = self.client.pipeline(transaction = False)
p.set('hi', 'ok')
p.get('hi')
gather_fn = p.execute(scatter_gather = True)
gather_fn()
self.assert_(p.zadd('hi', 'ok', 1))
def test_pipeline_scatter_with_gather_cant_gather_twice(self):
p = self.client.pipeline(transaction = False)
p.set('hi', 'ok')
p.get('hi')
gather_fn = p.execute(scatter_gather = True)
gather_fn()
self.assertRaises(redis.RedisError, gather_fn)
def test_pipeline_get_scatter_gather(self):
self.client.mset({'a': 'a1', 'b': 'b2'})
with self.client.pipeline(transaction=False) as pipe:
pipe.get('a')
pipe.get('b')
gather_fn = pipe.execute(scatter_gather = True)
self.assertEquals(gather_fn(), ['a1', 'b2'])
def test_pipeline_no_transaction(self):
with self.client.pipeline(transaction=False) as pipe:
pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1')
self.assertEquals(pipe.execute(), [True, True, True])
self.assertEquals(self.client['a'], 'a1')
self.assertEquals(self.client['b'], 'b1')
self.assertEquals(self.client['c'], 'c1')
def test_pipeline_no_transaction_watch(self):
self.client.set('a', 0)
with self.client.pipeline(transaction=False) as pipe:
pipe.watch('a')
a = pipe.get('a')
pipe.multi()
pipe.set('a', int(a) + 1)
result = pipe.execute()
self.assertEquals(result, [True])
def test_pipeline_no_transaction_watch_failure(self):
self.client.set('a', 0)
with self.client.pipeline(transaction=False) as pipe:
pipe.watch('a')
a = pipe.get('a')
self.client.set('a', 'bad')
pipe.multi()
pipe.set('a', int(a) + 1)
self.assertRaises(redis.WatchError, pipe.execute)
def test_invalid_command_in_pipeline(self):
# all commands but the invalid one should be excuted correctly
self.client['c'] = 'a'
with self.client.pipeline() as pipe:
pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4)
result = pipe.execute()
self.assertEquals(result[0], True)
self.assertEquals(self.client['a'], '1')
self.assertEquals(result[1], True)
self.assertEquals(self.client['b'], '2')
# we can't lpush to a key that's a string value, so this should
# be a ResponseError exception
self.assert_(isinstance(result[2], redis.ResponseError))
self.assertEquals(self.client['c'], 'a')
self.assertEquals(result[3], True)
self.assertEquals(self.client['d'], '4')
# make sure the pipe was restored to a working state
self.assertEquals(pipe.set('z', 'zzz').execute(), [True])
self.assertEquals(self.client['z'], 'zzz')
def test_watch_succeed(self):
self.client.set('a', 1)
self.client.set('b', 2)
with self.client.pipeline() as pipe:
pipe.watch('a', 'b')
self.assertEquals(pipe.watching, True)
a = pipe.get('a')
b = pipe.get('b')
self.assertEquals(a, '1')
self.assertEquals(b, '2')
pipe.multi()
pipe.set('c', 3)
self.assertEquals(pipe.execute(), [True])
self.assertEquals(pipe.watching, False)
def test_watch_failure(self):
self.client.set('a', 1)
self.client.set('b', 2)
with self.client.pipeline() as pipe:
pipe.watch('a', 'b')
self.client.set('b', 3)
pipe.multi()
pipe.get('a')
self.assertRaises(redis.WatchError, pipe.execute)
self.assertEquals(pipe.watching, False)
def test_unwatch(self):
self.client.set('a', 1)
self.client.set('b', 2)
with self.client.pipeline() as pipe:
pipe.watch('a', 'b')
self.client.set('b', 3)
pipe.unwatch()
self.assertEquals(pipe.watching, False)
pipe.get('a')
self.assertEquals(pipe.execute(), ['1'])
def test_transaction_callable(self):
self.client.set('a', 1)
self.client.set('b', 2)
has_run = []
def my_transaction(pipe):
a = pipe.get('a')
self.assert_(a in ('1', '2'))
b = pipe.get('b')
self.assertEquals(b, '2')
# silly one-once code... incr's a so WatchError should be raised
# forcing this all to run again
if not has_run:
self.client.incr('a')
has_run.append('it has')
pipe.multi()
pipe.set('c', int(a)+int(b))
result = self.client.transaction(my_transaction, 'a', 'b')
self.assertEquals(result, [True])
self.assertEquals(self.client.get('c'), '4')
|
{
"content_hash": "99c3479f33d669c48470b291b16e94d3",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 84,
"avg_line_length": 36.03125,
"alnum_prop": 0.5355594102341718,
"repo_name": "raphaelfruneaux/redis-py",
"id": "e1322a3653d25233bf1a571c955b207f5dfc9785",
"size": "6918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139803"
}
],
"symlink_target": ""
}
|
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name="catalogue", packages=find_packages())
|
{
"content_hash": "37f4620771d0c4012911f3b2106d06a6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 53,
"avg_line_length": 32.5,
"alnum_prop": 0.6538461538461539,
"repo_name": "explosion/catalogue",
"id": "347ead66ab2a8b2ae2708a422f0b68eaa7368b5c",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36740"
},
{
"name": "Shell",
"bytes": "351"
}
],
"symlink_target": ""
}
|
"""Nginx Configuration"""
import logging
import os
import re
import shutil
import socket
import subprocess
import sys
import time
import OpenSSL
import zope.interface
from acme import challenges
from acme import crypto_util as acme_crypto_util
from letsencrypt import achallenges
from letsencrypt import constants as core_constants
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import reverter
from letsencrypt.plugins import common
from letsencrypt_nginx import constants
from letsencrypt_nginx import dvsni
from letsencrypt_nginx import obj
from letsencrypt_nginx import parser
logger = logging.getLogger(__name__)
class NginxConfigurator(common.Plugin):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Nginx configurator.
.. todo:: Add proper support for comments in the config. Currently,
config files modified by the configurator will lose all their comments.
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_nginx.parser`
:ivar str save_notes: Human-readable config change notes
:ivar reverter: saves and reverts checkpoints
:type reverter: :class:`letsencrypt.reverter.Reverter`
:ivar tup version: version of Nginx
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Nginx Web Server - currently doesn't work"
@classmethod
def add_parser_arguments(cls, add):
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Nginx server root directory.")
add("ctl", default=constants.CLI_DEFAULTS["ctl"], help="Path to the "
"'nginx' binary, used for 'configtest' and retrieving nginx "
"version number.")
@property
def nginx_conf(self):
"""Nginx config file path."""
return os.path.join(self.conf("server_root"), "nginx.conf")
def __init__(self, *args, **kwargs):
"""Initialize an Nginx Configurator.
:param tup version: version of Nginx as a tuple (1, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(NginxConfigurator, self).__init__(*args, **kwargs)
# Verify that all directories and files exist with proper permissions
self._verify_setup()
# Files to save
self.save_notes = ""
# Add number of outstanding challenges
self._chall_out = 0
# These will be set in the prepare function
self.parser = None
self.version = version
self._enhance_func = {} # TODO: Support at least redirects
# Set up reverter
self.reverter = reverter.Reverter(self.config)
self.reverter.recovery_routine()
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
# This is called in determine_authenticator and determine_installer
def prepare(self):
"""Prepare the authenticator/installer."""
self.parser = parser.NginxParser(
self.conf('server-root'), self.mod_ssl_conf)
# Set Version
if self.version is None:
self.version = self.get_version()
temp_install(self.mod_ssl_conf)
# Entry point in main.py for installing cert
def deploy_cert(self, domain, cert_path, key_path,
chain_path, fullchain_path):
# pylint: disable=unused-argument
"""Deploys certificate to specified virtual host.
.. note:: Aborts if the vhost is missing ssl_certificate or
ssl_certificate_key.
.. note:: Nginx doesn't have a cert chain directive.
It expects the cert file to have the concatenated chain.
However, we use the chain file as input to the
ssl_trusted_certificate directive, used for verify OCSP responses.
.. note:: This doesn't save the config files!
"""
vhost = self.choose_vhost(domain)
cert_directives = [['ssl_certificate', fullchain_path],
['ssl_certificate_key', key_path]]
# OCSP stapling was introduced in Nginx 1.3.7. If we have that version
# or greater, add config settings for it.
stapling_directives = []
if self.version >= (1, 3, 7):
stapling_directives = [
['ssl_trusted_certificate', chain_path],
['ssl_stapling', 'on'],
['ssl_stapling_verify', 'on']]
try:
self.parser.add_server_directives(vhost.filep, vhost.names,
cert_directives, replace=True)
self.parser.add_server_directives(vhost.filep, vhost.names,
stapling_directives, replace=False)
logger.info("Deployed Certificate to VirtualHost %s for %s",
vhost.filep, vhost.names)
except errors.MisconfigurationError as error:
logger.debug(error)
logger.warn(
"Cannot find a cert or key directive in %s for %s. "
"VirtualHost was not modified.", vhost.filep, vhost.names)
# Presumably break here so that the virtualhost is not modified
return False
self.save_notes += ("Changed vhost at %s with addresses of %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs)))
self.save_notes += "\tssl_certificate %s\n" % cert_path
self.save_notes += "\tssl_certificate_key %s\n" % key_path
#######################
# Vhost parsing methods
#######################
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
.. note:: This makes the vhost SSL-enabled if it isn't already. Follows
Nginx's server block selection rules preferring blocks that are
already SSL.
.. todo:: This should maybe return list if no obvious answer
is presented.
.. todo:: The special name "$hostname" corresponds to the machine's
hostname. Currently we just ignore this.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_nginx.obj.VirtualHost`
"""
vhost = None
matches = self._get_ranked_matches(target_name)
if not matches:
# No matches. Create a new vhost with this name in nginx.conf.
filep = self.parser.loc["root"]
new_block = [['server'], [['server_name', target_name]]]
self.parser.add_http_directives(filep, new_block)
vhost = obj.VirtualHost(filep, set([]), False, True,
set([target_name]), list(new_block[1]))
elif matches[0]['rank'] in xrange(2, 6):
# Wildcard match - need to find the longest one
rank = matches[0]['rank']
wildcards = [x for x in matches if x['rank'] == rank]
vhost = max(wildcards, key=lambda x: len(x['name']))['vhost']
else:
vhost = matches[0]['vhost']
if vhost is not None:
if not vhost.ssl:
self._make_server_ssl(vhost)
return vhost
def _get_ranked_matches(self, target_name):
"""Returns a ranked list of vhosts that match target_name.
:param str target_name: The name to match
:returns: list of dicts containing the vhost, the matching name, and
the numerical rank
:rtype: list
"""
# Nginx chooses a matching server name for a request with precedence:
# 1. exact name match
# 2. longest wildcard name starting with *
# 3. longest wildcard name ending with *
# 4. first matching regex in order of appearance in the file
matches = []
for vhost in self.parser.get_vhosts():
name_type, name = parser.get_best_match(target_name, vhost.names)
if name_type == 'exact':
matches.append({'vhost': vhost,
'name': name,
'rank': 0 if vhost.ssl else 1})
elif name_type == 'wildcard_start':
matches.append({'vhost': vhost,
'name': name,
'rank': 2 if vhost.ssl else 3})
elif name_type == 'wildcard_end':
matches.append({'vhost': vhost,
'name': name,
'rank': 4 if vhost.ssl else 5})
elif name_type == 'regex':
matches.append({'vhost': vhost,
'name': name,
'rank': 6 if vhost.ssl else 7})
return sorted(matches, key=lambda x: x['rank'])
def get_all_names(self):
"""Returns all names found in the Nginx Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
for vhost in self.parser.get_vhosts():
all_names.update(vhost.names)
for addr in vhost.addrs:
host = addr.get_addr()
if common.hostname_regex.match(host):
# If it's a hostname, add it to the names.
all_names.add(host)
elif not common.private_ips_regex.match(host):
# If it isn't a private IP, do a reverse DNS lookup
# TODO: IPv6 support
try:
socket.inet_aton(host)
all_names.add(socket.gethostbyaddr(host)[0])
except (socket.error, socket.herror, socket.timeout):
continue
return all_names
def _get_snakeoil_paths(self):
# TODO: generate only once
tmp_dir = os.path.join(self.config.work_dir, "snakeoil")
le_key = crypto_util.init_save_key(
key_size=1024, key_dir=tmp_dir, keyname="key.pem")
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, le_key.pem)
cert = acme_crypto_util.gen_ss_cert(key, domains=[socket.gethostname()])
cert_path = os.path.join(tmp_dir, "cert.pem")
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert)
with open(cert_path, 'w') as cert_file:
cert_file.write(cert_pem)
return cert_path, le_key.file
def _make_server_ssl(self, vhost):
"""Make a server SSL.
Make a server SSL based on server_name and filename by adding a
``listen IConfig.dvsni_port ssl`` directive to the server block.
.. todo:: Maybe this should create a new block instead of modifying
the existing one?
:param vhost: The vhost to add SSL to.
:type vhost: :class:`~letsencrypt_nginx.obj.VirtualHost`
"""
snakeoil_cert, snakeoil_key = self._get_snakeoil_paths()
ssl_block = [['listen', '{0} ssl'.format(self.config.dvsni_port)],
# access and error logs necessary for integration
# testing (non-root)
['access_log', os.path.join(
self.config.work_dir, 'access.log')],
['error_log', os.path.join(
self.config.work_dir, 'error.log')],
['ssl_certificate', snakeoil_cert],
['ssl_certificate_key', snakeoil_key],
['include', self.parser.loc["ssl_options"]]]
self.parser.add_server_directives(
vhost.filep, vhost.names, ssl_block)
vhost.ssl = True
vhost.raw.extend(ssl_block)
vhost.addrs.add(obj.Addr('', str(self.config.dvsni_port), True, False))
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
return self.parser.get_all_certs_keys()
##################################
# enhancement methods (IInstaller)
##################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return []
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
"""
try:
return self._enhance_func[enhancement](
self.choose_vhost(domain), options)
except (KeyError, ValueError):
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
######################################
# Nginx server management (IInstaller)
######################################
def restart(self):
"""Restarts nginx server.
:returns: Success
:rtype: bool
"""
return nginx_restart(self.conf('ctl'), self.nginx_conf)
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Nginx for errors.
:returns: Success
:rtype: bool
"""
try:
proc = subprocess.Popen(
[self.conf('ctl'), "-c", self.nginx_conf, "-t"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.fatal("Unable to run nginx config test")
sys.exit(1)
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Config test failed\n%s\n%s", stdout, stderr)
return False
return True
def _verify_setup(self):
"""Verify the setup to ensure safe operating environment.
Make sure that files/directories are setup with appropriate permissions
Aim for defensive coding... make sure all input files
have permissions of root.
"""
uid = os.geteuid()
le_util.make_or_verify_dir(
self.config.work_dir, core_constants.CONFIG_DIRS_MODE, uid)
le_util.make_or_verify_dir(
self.config.backup_dir, core_constants.CONFIG_DIRS_MODE, uid)
le_util.make_or_verify_dir(
self.config.config_dir, core_constants.CONFIG_DIRS_MODE, uid)
def get_version(self):
"""Return version of Nginx Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError:
Unable to find Nginx version or version is unsupported
"""
try:
proc = subprocess.Popen(
[self.conf('ctl'), "-c", self.nginx_conf, "-V"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
text = proc.communicate()[1] # nginx prints output to stderr
except (OSError, ValueError) as error:
logging.debug(error, exc_info=True)
raise errors.PluginError(
"Unable to run %s -V" % self.conf('ctl'))
version_regex = re.compile(r"nginx/([0-9\.]*)", re.IGNORECASE)
version_matches = version_regex.findall(text)
sni_regex = re.compile(r"TLS SNI support enabled", re.IGNORECASE)
sni_matches = sni_regex.findall(text)
ssl_regex = re.compile(r" --with-http_ssl_module")
ssl_matches = ssl_regex.findall(text)
if not version_matches:
raise errors.PluginError("Unable to find Nginx version")
if not ssl_matches:
raise errors.PluginError(
"Nginx build is missing SSL module (--with-http_ssl_module).")
if not sni_matches:
raise errors.PluginError("Nginx build doesn't support SNI")
nginx_version = tuple([int(i) for i in version_matches[0].split(".")])
# nginx < 0.8.48 uses machine hostname as default server_name instead of
# the empty string
if nginx_version < (0, 8, 48):
raise errors.NotSupportedError("Nginx version must be 0.8.48+")
return nginx_version
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Nginx to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###################################################
# Wrapper functions for Reverter class (IInstaller)
###################################################
def save(self, title=None, temporary=False):
"""Saves all changes to the configuration files.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (ie. challenges)
"""
save_files = set(self.parser.parsed.keys())
# Create Checkpoint
if temporary:
self.reverter.add_to_temp_checkpoint(
save_files, self.save_notes)
else:
self.reverter.add_to_checkpoint(save_files,
self.save_notes)
# Change 'ext' to something else to not override existing conf files
self.parser.filedump(ext='')
if title and not temporary:
self.reverter.finalize_checkpoint(title)
return True
def recovery_routine(self):
"""Revert all previously modified files.
Reverts all modified files that have not been saved as a checkpoint
"""
self.reverter.recovery_routine()
self.parser.load()
def revert_challenge_config(self):
"""Used to cleanup challenge configurations."""
self.reverter.revert_temporary_config()
self.parser.load()
def rollback_checkpoints(self, rollback=1):
"""Rollback saved checkpoints.
:param int rollback: Number of checkpoints to revert
"""
self.reverter.rollback_checkpoints(rollback)
self.parser.load()
def view_config_changes(self):
"""Show all of the configuration changes that have taken place."""
self.reverter.view_config_changes()
###########################################################################
# Challenges Section for IAuthenticator
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
# Entry point in main.py for performing challenges
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out += len(achalls)
responses = [None] * len(achalls)
nginx_dvsni = dvsni.NginxDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
nginx_dvsni.add_chall(achall, i)
sni_response = nginx_dvsni.perform()
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge types
self.restart()
# Go through all of the challenges and assign them to the proper place
# in the responses return value. All responses must be in the same order
# as the original challenges.
for i, resp in enumerate(sni_response):
responses[nginx_dvsni.indices[i]] = resp
return responses
# called after challenges are performed
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out -= len(achalls)
# If all of the challenges have been finished, clean up everything
if self._chall_out <= 0:
self.revert_challenge_config()
self.restart()
def nginx_restart(nginx_ctl, nginx_conf="/etc/nginx.conf"):
"""Restarts the Nginx Server.
.. todo:: Nginx restart is fatal if the configuration references
non-existent SSL cert/key files. Remove references to /etc/letsencrypt
before restart.
:param str nginx_ctl: Path to the Nginx binary.
"""
try:
proc = subprocess.Popen([nginx_ctl, "-c", nginx_conf, "-s", "reload"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Maybe Nginx isn't running
nginx_proc = subprocess.Popen([nginx_ctl, "-c", nginx_conf],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = nginx_proc.communicate()
if nginx_proc.returncode != 0:
# Enter recovery routine...
logger.error("Nginx Restart Failed!\n%s\n%s", stdout, stderr)
return False
except (OSError, ValueError):
logger.fatal("Nginx Restart Failed - Please Check the Configuration")
sys.exit(1)
# Nginx can take a moment to recognize a newly added TLS SNI servername, so sleep
# for a second. TODO: Check for expected servername and loop until it
# appears or return an error if looping too long.
time.sleep(1)
return True
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
|
{
"content_hash": "dc246139bb293ebadd01a11ebced0ec7",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 85,
"avg_line_length": 37.35703001579779,
"alnum_prop": 0.5807502008711465,
"repo_name": "hsduk/lets-encrypt-preview",
"id": "29e69e498ae183f5c05661bf90340ff342f56c77",
"size": "23647",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "letsencrypt-nginx/letsencrypt_nginx/configurator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1148972"
},
{
"name": "Shell",
"bytes": "20124"
}
],
"symlink_target": ""
}
|
from telemetry.page import page_benchmark_unittest_base
from perf_tools import image_decoding_benchmark
class ImageDecodingBenchmarkUnitTest(
page_benchmark_unittest_base.PageBenchmarkUnitTestBase):
def testImageDecodingMeasurement(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('image_decoding.html')
benchmark = image_decoding_benchmark.ImageDecoding()
all_results = self.RunBenchmark(benchmark, ps)
self.assertEqual(0, len(all_results.page_failures))
self.assertEqual(1, len(all_results.page_results))
results0 = all_results.page_results[0]
self.assertTrue('ImageDecoding_avg' in results0)
self.assertTrue(results0['ImageDecoding_avg'] > 0)
|
{
"content_hash": "3b79d942b8b7364a93bda3aa6d3a123e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 36.578947368421055,
"alnum_prop": 0.7784172661870503,
"repo_name": "codenote/chromium-test",
"id": "1bbe27dabb9b3cf1338e7e0f075d0b8a4e361f09",
"size": "862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/perf/perf_tools/image_decoding_benchmark_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Support for TPLink lights."""
import logging
import time
from pyHS100 import SmartBulb, SmartDeviceException
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
Light,
)
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from . import CONF_LIGHT, DOMAIN as TPLINK_DOMAIN
from .common import async_add_entities_retry
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_W = "current_power_w"
ATTR_DAILY_ENERGY_KWH = "daily_energy_kwh"
ATTR_MONTHLY_ENERGY_KWH = "monthly_energy_kwh"
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform.
Deprecated.
"""
_LOGGER.warning(
"Loading as a platform is no longer supported, "
"convert to use the tplink component."
)
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up switches."""
await async_add_entities_retry(
hass, async_add_entities, hass.data[TPLINK_DOMAIN][CONF_LIGHT], add_entity
)
return True
def add_entity(device: SmartBulb, async_add_entities):
"""Check if device is online and add the entity."""
# Attempt to get the sysinfo. If it fails, it will raise an
# exception that is caught by async_add_entities_retry which
# will try again later.
device.get_sysinfo()
async_add_entities([TPLinkSmartBulb(device)], update_before_add=True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt * 100.0) / 255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent * 255.0) / 100.0
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: SmartBulb) -> None:
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._sysinfo = None
self._state = None
self._available = False
self._color_temp = None
self._brightness = None
self._hs = None
self._supported_features = None
self._min_mireds = None
self._max_mireds = None
self._emeter_params = {}
self._mac = None
self._alias = None
self._model = None
@property
def unique_id(self):
"""Return a unique ID."""
return self._mac
@property
def name(self):
"""Return the name of the Smart Bulb."""
return self._alias
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._alias,
"model": self._model,
"manufacturer": "TP-Link",
"connections": {(dr.CONNECTION_NETWORK_MAC, self._mac)},
"sw_version": self._sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def turn_on(self, **kwargs):
"""Turn the light on."""
self._state = True
self.smartbulb.state = SmartBulb.BULB_STATE_ON
if ATTR_COLOR_TEMP in kwargs:
self._color_temp = kwargs.get(ATTR_COLOR_TEMP)
self.smartbulb.color_temp = mired_to_kelvin(self._color_temp)
brightness_value = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
brightness_pct = brightness_to_percentage(brightness_value)
if ATTR_HS_COLOR in kwargs:
self._hs = kwargs.get(ATTR_HS_COLOR)
hue, sat = self._hs
hsv = (int(hue), int(sat), brightness_pct)
self.smartbulb.hsv = hsv
elif ATTR_BRIGHTNESS in kwargs:
self._brightness = brightness_value
self.smartbulb.brightness = brightness_pct
def turn_off(self, **kwargs):
"""Turn the light off."""
self._state = False
self.smartbulb.state = SmartBulb.BULB_STATE_OFF
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return self._min_mireds
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return self._max_mireds
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the color."""
return self._hs
@property
def is_on(self):
"""Return True if device is on."""
return self._state
def update(self):
"""Update the TP-Link Bulb's state."""
if self._supported_features is None:
# First run, update by blocking.
self.do_update()
else:
# Not first run, update in the background.
self.hass.add_job(self.do_update)
def do_update(self):
"""Update states."""
try:
if self._supported_features is None:
self.get_features()
self._state = self.smartbulb.state == SmartBulb.BULB_STATE_ON
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = brightness_from_percentage(self.smartbulb.brightness)
if self._supported_features & SUPPORT_COLOR_TEMP:
if (
self.smartbulb.color_temp is not None
and self.smartbulb.color_temp != 0
):
self._color_temp = kelvin_to_mired(self.smartbulb.color_temp)
if self._supported_features & SUPPORT_COLOR:
hue, sat, _ = self.smartbulb.hsv
self._hs = (hue, sat)
if self.smartbulb.has_emeter:
self._emeter_params[ATTR_CURRENT_POWER_W] = "{:.1f}".format(
self.smartbulb.current_consumption()
)
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_ENERGY_KWH] = "{:.3f}".format(
daily_statistics[int(time.strftime("%d"))]
)
self._emeter_params[ATTR_MONTHLY_ENERGY_KWH] = "{:.3f}".format(
monthly_statistics[int(time.strftime("%m"))]
)
except KeyError:
# device returned no daily/monthly history
pass
self._available = True
except (SmartDeviceException, OSError) as ex:
if self._available:
_LOGGER.warning(
"Could not read state for %s: %s", self.smartbulb.host, ex
)
self._available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def get_features(self):
"""Determine all supported features in one go."""
self._sysinfo = self.smartbulb.sys_info
self._supported_features = 0
self._mac = self.smartbulb.mac
self._alias = self.smartbulb.alias
self._model = self.smartbulb.model
if self.smartbulb.is_dimmable:
self._supported_features += SUPPORT_BRIGHTNESS
if getattr(self.smartbulb, "is_variable_color_temp", False):
self._supported_features += SUPPORT_COLOR_TEMP
self._min_mireds = kelvin_to_mired(
self.smartbulb.valid_temperature_range[1]
)
self._max_mireds = kelvin_to_mired(
self.smartbulb.valid_temperature_range[0]
)
if getattr(self.smartbulb, "is_color", False):
self._supported_features += SUPPORT_COLOR
|
{
"content_hash": "7bba0dbfd89a5e97c71ce9269fe05d47",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 88,
"avg_line_length": 32,
"alnum_prop": 0.5917300380228137,
"repo_name": "leppa/home-assistant",
"id": "ec3307fc87e3f03f4384d2af3ce8e44dfac50f95",
"size": "8416",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tplink/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from .base import *
DEBUG = False
TESTING = get_env_variable_bool('TESTING')
if get_env_variable_bool('SSL'):
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ALLOWED_HOSTS = [get_env_variable('ALLOWED_HOSTS'), ]
DOMAIN = get_env_variable('DOMAIN')
DATABASE = DOMAIN.replace('.', '_')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': DATABASE,
'USER': DATABASE,
'PASSWORD': get_env_variable('DB_PASS'),
'HOST': get_env_variable('DB_IP'),
'PORT': '',
}
}
# Celery
from kombu import Exchange, Queue
# transport
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
# number of worker processes (will be 3 == controller, worker and beat)
CELERYD_CONCURRENCY = 1
# rate limits
CELERY_DISABLE_RATE_LIMITS = True
# serializer
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
# queue
CELERY_DEFAULT_QUEUE = DATABASE
CELERY_QUEUES = (
Queue(DATABASE, Exchange(DATABASE), routing_key=DATABASE),
)
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'mail_time_summary': {
'task': 'invoice.tasks.mail_time_summary',
'schedule': crontab(minute='30', hour='5'),
},
'process_mail': {
'task': 'mail.tasks.process_mail',
'schedule': crontab(minute='1', hour='*/1'),
},
'time_summary_by_user': {
'task': 'invoice.tasks.time_summary_by_user',
'schedule': crontab(minute='30', hour='4'),
},
'update_search_index': {
'task': 'search.tasks.update_search_index',
'schedule': crontab(minute='15', hour='*/1'),
},
}
FTP_STATIC_DIR = None
FTP_STATIC_URL = None
HAYSTACK_CONNECTIONS = {
'default': {
'BATCH_SIZE': 100,
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'INDEX_NAME': '{}'.format(SITE_NAME),
'TIMEOUT': 60 * 5,
'URL': 'http://127.0.0.1:9200/',
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = get_env_variable("MEDIA_ROOT")
# https://github.com/johnsensible/django-sendfile
SENDFILE_BACKEND = 'sendfile.backends.nginx'
SENDFILE_ROOT = get_env_variable("SENDFILE_ROOT")
SENDFILE_URL = '/private'
# Django debug toolbar (this is the address of the client not the server)
# INTERNAL_IPS = ('87.115.141.255',)
THUMBNAIL_DEBUG = DEBUG
|
{
"content_hash": "cf33f563e90a9c9eaa486c13193a9d3b",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 86,
"avg_line_length": 28.0989010989011,
"alnum_prop": 0.650371529135706,
"repo_name": "pkimber/kbsoftware_couk",
"id": "39e7b8a741d49dffcdc1b00693040edb51b3e793",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/production.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15153"
},
{
"name": "Python",
"bytes": "29718"
},
{
"name": "Shell",
"bytes": "1242"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Sequence
from sqlalchemy import Column
from sqlalchemy import BigInteger, Integer, SmallInteger
from sqlalchemy import DateTime, Float, String, Unicode
from datetime import datetime
from mabolab.database.dbsession import Base
from mabolab.database.model import ColumnMixin
class CertificatePrintm(Base, ColumnMixin):
__tablename__ = 'mt_t_certificate_printm'
def __init__(self, , createdby):
"""init"""
self.createdby = createdby
self.createdon = datetime.now()
self.lastupdateon = datetime.now()
self.lastupdatedby = createdby
self.active = 1
self.rowversionstamp = 1
def __repr__(self):
return "<CertificatePrintm(%s, )>" \
% (self.id, )
|
{
"content_hash": "67e546a715414491a26e66a285ec5388",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 56,
"avg_line_length": 22.71794871794872,
"alnum_prop": 0.6613995485327314,
"repo_name": "mabotech/maboss.py",
"id": "097828045e59487a07fc52bcccd6cd888ddca698",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maboss/webx/tools/output/models/certificate_printm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14864"
},
{
"name": "JavaScript",
"bytes": "4950"
},
{
"name": "Lua",
"bytes": "683"
},
{
"name": "Python",
"bytes": "433923"
},
{
"name": "Shell",
"bytes": "667"
}
],
"symlink_target": ""
}
|
from neomodel.core import StructuredNode
from neomodel.exceptions import InflateConflict, DeflateConflict
from neomodel.util import _get_node_properties
class SemiStructuredNode(StructuredNode):
"""
A base class allowing properties to be stored on a node that aren't
specified in its definition. Conflicting properties are signaled with the
:class:`DeflateConflict` exception::
class Person(SemiStructuredNode):
name = StringProperty()
age = IntegerProperty()
def hello(self):
print("Hi my names " + self.name)
tim = Person(name='Tim', age=8, weight=11).save()
tim.hello = "Hi"
tim.save() # DeflateConflict
"""
__abstract_node__ = True
def __init__(self, *args, **kwargs):
super(SemiStructuredNode, self).__init__(*args, **kwargs)
@classmethod
def inflate(cls, node):
# support lazy loading
if isinstance(node, int):
snode = cls()
snode.id = node
else:
props = {}
for key, prop in cls.__all_properties__:
node_properties = _get_node_properties(node)
if key in node_properties:
props[key] = prop.inflate(node_properties[key], node)
elif prop.has_default:
props[key] = prop.default_value()
else:
props[key] = None
# handle properties not defined on the class
for free_key in (x for x in node_properties if x not in props):
if hasattr(cls, free_key):
raise InflateConflict(cls, free_key,
node_properties[free_key], node.id)
props[free_key] = node_properties[free_key]
snode = cls(**props)
snode.id = node.id
return snode
@classmethod
def deflate(cls, node_props, obj=None, skip_empty=False):
deflated = super(SemiStructuredNode, cls).deflate(node_props, obj,
skip_empty=skip_empty)
for key in [k for k in node_props if k not in deflated]:
if hasattr(cls, key):
raise DeflateConflict(cls, key, deflated[key], obj.id)
node_props.update(deflated)
return node_props
|
{
"content_hash": "b63dda6453daece19574698800ec4a0c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 37.015625,
"alnum_prop": 0.5571971295905446,
"repo_name": "robinedwards/neomodel",
"id": "883a5895991c7ccc21ccbccd246fb4802705a3a0",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neomodel/contrib/semi_structured.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140274"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
}
|
__author__ = 'Jan-Piet Mens <jpmens()gmail.com>'
__copyright__ = 'Copyright 2014 Jan-Piet Mens'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
import socket
import time
def plugin(srv, item):
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
# item.config is brought in from the configuration file
config = item.config
# addrs is a list[] associated with a particular target.
try:
carbon_host, carbon_port = item.addrs
carbon_port = int(carbon_port)
except:
srv.logging.error("Configuration for target `carbon' is incorrect")
return False
# If the incoming payload has been transformed, use that,
# else the original payload
text = item.message
try:
parts = text.split()
except:
srv.logging.error("target `carbon': cannot split string")
return False
if len(parts) == 1:
metric_name = item.data.get('topic', 'ohno').replace('/', '.')
value = parts[0]
tics = int(time.time())
else:
if len(parts) == 2:
metric_name = parts[0]
value = parts[1]
tics = int(time.time())
else:
if len(parts) == 3:
metric_name = parts[0]
value = parts[1]
tics = int(parts[2])
carbon_msg = "%s %s %d" % (metric_name, value, tics)
srv.logging.debug("Sending to carbon: %s" % (carbon_msg))
carbon_msg = carbon_msg + "\n"
try:
sock = socket.socket()
sock.connect((carbon_host, carbon_port))
sock.sendall(carbon_msg)
sock.close()
except Exception, e:
srv.logging.warning("Cannot send to carbon service %s:%d: %s" % (carbon_host, carbon_port, str(e)))
return False
return True
|
{
"content_hash": "5c557ae9cbdd04d776c53e82a4ff07d1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 107,
"avg_line_length": 30.704918032786885,
"alnum_prop": 0.5750133475707421,
"repo_name": "sourceperl/docker.mqttwarn",
"id": "72395f56e68913d783a09ebb07d775d723c8bec2",
"size": "1920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "services/carbon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101494"
}
],
"symlink_target": ""
}
|
from atelier.invlib import setup_from_tasks
ns = setup_from_tasks(
globals(), "atelier",
blogref_url="http://luc.lino-framework.org",
revision_control_system='git',
# tolerate_sphinx_warnings=True,
cleanable_files=['docs/api/atelier.*'])
|
{
"content_hash": "b88e1adc84abbdd594003d061eb236c1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.689922480620155,
"repo_name": "lsaffre/atelier",
"id": "d5fc82d7d3f0d2427086745854131314afd36ca9",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "163112"
}
],
"symlink_target": ""
}
|
import decimal
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from django.contrib.auth import authenticate, login, logout
from mock import Mock
from ..middleware import ActiveSubscriptionMiddleware
from ..models import Customer, CurrentSubscription
from ..utils import get_user_model
class DummySession(dict):
def cycle_key(self):
return
def flush(self):
return
class ActiveSubscriptionMiddlewareTests(TestCase):
urls = 'payments.tests.test_urls'
def setUp(self):
self.middleware = ActiveSubscriptionMiddleware()
self.request = Mock()
self.request.META = {}
self.request.session = DummySession()
self.old_urls = settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS
settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS += (
'signup',
'password_reset'
)
user = get_user_model().objects.create_user(username="patrick")
user.set_password("eldarion")
user.save()
user = authenticate(username="patrick", password="eldarion")
login(self.request, user)
def tearDown(self):
settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS = self.old_urls
def test_authed_user_with_no_customer_redirects_on_non_exempt_url(self):
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response._headers["location"][1], # pylint: disable=W0212
reverse(settings.SUBSCRIPTION_REQUIRED_REDIRECT)
)
def test_authed_user_with_no_customer_passes_with_exempt_url(self):
self.request.path = "/accounts/signup/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_customer_passes_with_exempt_url_containing_pattern(self):
self.request.path = "/password/reset/confirm/test-token/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_active_subscription_passes_with_exempt_url(self):
Customer.objects.create(stripe_id="cus_1", user=self.request.user)
self.request.path = "/accounts/signup/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_active_subscription_redirects_on_non_exempt_url(self):
Customer.objects.create(stripe_id="cus_1", user=self.request.user)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response._headers["location"][1], # pylint: disable=W0212
reverse(settings.SUBSCRIPTION_REQUIRED_REDIRECT)
)
def test_authed_user_with_active_subscription_redirects_on_non_exempt_url(self):
customer = Customer.objects.create(
stripe_id="cus_1",
user=self.request.user
)
CurrentSubscription.objects.create(
customer=customer,
plan="pro",
quantity=1,
start=timezone.now(),
status="active",
cancel_at_period_end=False,
amount=decimal.Decimal("19.99"),
currency="usd"
)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_unauthed_user_passes(self):
logout(self.request)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_staff_user_passes(self):
self.request.user.is_staff = True
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
|
{
"content_hash": "b305efd8e40b35006236163242d9cf47",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 90,
"avg_line_length": 35.60526315789474,
"alnum_prop": 0.6612466124661247,
"repo_name": "alexhayes/django-stripe-payments",
"id": "f4076ee815d11c1d824a8443024374a48de2233e",
"size": "4083",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "payments/tests/test_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9439"
},
{
"name": "Python",
"bytes": "138961"
}
],
"symlink_target": ""
}
|
import sys
import webob
from prestans import exception
class ErrorResponse(webob.Response):
"""
ErrorResponse is a specialised webob.Response, it is responsible for writing
out a message in the following format; using the currently selected serializer
{
"code": 404,
"message": "This is an error message",
"trace": [
{
"key": "value"
}
]
}
"""
def __init__(self, raised_exception, serializer):
super(ErrorResponse, self).__init__()
self._exception = raised_exception
self._serializer = serializer
self._message = raised_exception.message
self._stack_trace = raised_exception.stack_trace
# self._trace = None
# IETF hash dropped the X- prefix for custom headers
# http://stackoverflow.com/q/3561381
# http://tools.ietf.org/html/draft-saintandre-xdash-00
from prestans import __version__ as version
if not isinstance(version, str):
version = version.encode("latin1")
self.headers.add('Prestans-Version', version)
self.content_type = self._serializer.content_type()
self.status = raised_exception.http_status
# @property
# def trace(self):
# return self._trace
#
# def append_to_trace(self, trace_entry):
# """
# Use this to append to the stack trace
# """
# self._trace.append(trace_entry)
def __call__(self, environ, start_response):
# we have received a custom error response model, use it instead
if isinstance(self._exception, exception.ResponseException) and self._exception.response_model:
body_as_string = self._serializer.dumps(self._exception.response_model.as_serializable())
# pack into default format for error response
else:
error_dict = {
"code": self.status_int,
"message": self._message,
"trace": self._stack_trace
}
body_as_string = self._serializer.dumps(error_dict)
self.content_length = len(body_as_string)
start_response(self.status, self.headerlist)
return [body_as_string.encode("utf-8")]
|
{
"content_hash": "785c312cc23f5e9706339a445bb41a49",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 103,
"avg_line_length": 30.743243243243242,
"alnum_prop": 0.596043956043956,
"repo_name": "anomaly/prestans",
"id": "72138128f8e72cf8abf6dd1a2af7f343f1ededa7",
"size": "2275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prestans/rest/error_response.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "44024"
},
{
"name": "Makefile",
"bytes": "1131"
},
{
"name": "Python",
"bytes": "531098"
}
],
"symlink_target": ""
}
|
from experiment import Experiment
from episodic import EpisodicExperiment
from continuous import ContinuousExperiment
from queued import QueuedExperiment
from tournament import Tournament
|
{
"content_hash": "2aff8daec8c905fd2ffef3817319df0e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 37.8,
"alnum_prop": 0.8888888888888888,
"repo_name": "daanwierstra/pybrain",
"id": "a8fe5a492a81e9a230177defe162792b406043c6",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybrain/rl/experiments/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "380415"
},
{
"name": "Python",
"bytes": "1279804"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
import os
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# END EMAIL CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'default.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# END DATABASE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# END CACHE CONFIGURATION
# TOOLBAR CONFIGURATION
# See:
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'password_reset',
)
MIDDLEWARE_CLASSES += (
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
# END TOOLBAR CONFIGURATION
AUTH_PROFILE_MODULE = 'users.UserProfile'
support_email = "survey@coliving.org"
|
{
"content_hash": "91e80a1ba4f06c8917f79191aab512fe",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 108,
"avg_line_length": 27.956521739130434,
"alnum_prop": 0.6666666666666666,
"repo_name": "dmallcott/C-Nutra",
"id": "ddd492a69cc89725e049fe010ffc0c18f834b74d",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c_nutra/config/settings/local.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54060"
},
{
"name": "JavaScript",
"bytes": "141536"
},
{
"name": "Python",
"bytes": "82103"
},
{
"name": "Ruby",
"bytes": "853"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
"""
WSGI config for brickset_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brickset_app.settings")
application = get_wsgi_application()
|
{
"content_hash": "b9152874918f1b34c54dd00cff681949",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.125,
"alnum_prop": 0.7711442786069652,
"repo_name": "pyfirst/samplecode",
"id": "40a319d67e475230da8a9acee24a0785b47667a7",
"size": "402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "6_web/brickset_app/brickset_app/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "805"
},
{
"name": "HTML",
"bytes": "8191"
},
{
"name": "Jupyter Notebook",
"bytes": "309155"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "34757"
}
],
"symlink_target": ""
}
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import create_app
from app.models import db
from app.models.user import User
from app.models.skill import Skill
from app.models.role import Role
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
@manager.shell
def make_shell_context():
return dict(
app=app,
db=db,
# add models
User=User,
Skill=Skill,
Role=Role
)
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "b46810269059754581ea0af2999eff1d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 49,
"avg_line_length": 15.447368421052632,
"alnum_prop": 0.6541737649063032,
"repo_name": "AthelasPeru/laborapp",
"id": "15940874134cfa56a1b55ae85e5015a7b2bcfc98",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "346803"
},
{
"name": "HTML",
"bytes": "25611"
},
{
"name": "JavaScript",
"bytes": "101056"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "17308"
},
{
"name": "Ruby",
"bytes": "976"
},
{
"name": "Shell",
"bytes": "260"
}
],
"symlink_target": ""
}
|
import logging as loggers
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from deepy.utils import FLOATX, dim_to_var, EPSILON
from deepy.trainers.util import wrap_core, multiple_l2_norm
from deepy.conf import TrainerConfig
logging = loggers.getLogger(__name__)
def optimize_updates(params, gradients, config=None, shapes=None):
"""
General optimization function for Theano.
Parameters:
params - parameters
gradients - gradients
config - training config
Returns:
Theano updates
:type config: deepy.TrainerConfig or dict
"""
if config and isinstance(config, dict):
config = TrainerConfig(config)
# Clipping
if config:
clip_value = config.get("gradient_clipping", None)
if clip_value:
clip_constant = T.constant(clip_value, dtype=FLOATX)
grad_norm = multiple_l2_norm(gradients)
multiplier = ifelse(grad_norm < clip_constant,
T.constant(1., dtype=FLOATX), clip_constant / (grad_norm + EPSILON))
clipped_gradients = []
for g in gradients:
if config.gradient_tolerance:
g = ifelse(grad_norm > config.gradient_tolerance, T.zeros_like(g) + EPSILON, g)
g = multiplier * g
clipped_gradients.append(g)
gradients = clipped_gradients
# Regularization
if config and config.weight_l2:
regularized_gradients = []
for param, grad in zip(params, gradients):
grad = grad + (2 * config.weight_l2 * param)
regularized_gradients.append(grad)
gradients = regularized_gradients
# Avoid nan
if config.avoid_nan:
logging.info("avoid NaN gradients")
new_gradients = []
for grad in gradients:
new_grad = ifelse(T.isnan(grad).any(), T.zeros_like(grad) + EPSILON, grad)
new_gradients.append(new_grad)
gradients = new_gradients
# Find method
method = "SGD"
if config:
method = config.get("method", method).upper()
# Get Function
func = None
if method in ["SGD", "ADAGRAD", "ADADELTA", "FINETUNING_ADAGRAD"]:
from cores.ada_family import ada_family_core
func = ada_family_core
elif method == "ADAM":
from cores.adam import adam_core
func = adam_core
elif method == "RMSPROP":
from cores.rmsprop import rmsprop_core
func = rmsprop_core
elif method == "MOMENTUM":
from cores.momentum import momentum_core
func = momentum_core
if not func:
raise NotImplementedError("method '%s' is not supported" % method)
logging.info("optimize method=%s parameters=%s" % (method, str(params)))
free_parameters = []
return_vals = wrap_core(func, config, params, gradients)
if type(return_vals) == list and type(return_vals[0]) == list:
updates, free_parameters = return_vals
else:
updates = return_vals
# Weight bound
if config.weight_bound:
logging.info("apply weight bound of %.2f" % config.weight_bound)
new_updates = []
for param, update_value in updates:
bounded_value = (update_value * (T.abs_(update_value) <= config.weight_bound) +
config.weight_bound * (update_value > config.weight_bound) +
-config.weight_bound * (update_value < -config.weight_bound))
new_updates.append((param, bounded_value))
updates = new_updates
return updates, free_parameters
def optimize_function(params, config=None):
"""
Create a optimizing function receives gradients.
Parameters:
params - parameters
config - training configuration
Returns:
updating function receives gradients
"""
gs = [dim_to_var(p.ndim) for p in params]
updates, _ = optimize_updates(params, gs, config)
return theano.function(gs, [], updates=updates)
|
{
"content_hash": "d8636aa84ed220d03e6d92fedbe717ce",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 104,
"avg_line_length": 35.00869565217391,
"alnum_prop": 0.6159960258320915,
"repo_name": "wolet/deepy",
"id": "e8a40b7b3a59fc1750fd2c20eda43903dc3c3c7c",
"size": "4074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deepy/trainers/optimize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15998"
},
{
"name": "Python",
"bytes": "222029"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
}
|
"""
SiteAlert, what are you waiting for?
Copyright (c) 2015, Matteo Pietro Dazzi <---> ilteoood
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
========================================================================================================================
"""
__author__ = 'iLTeoooD'
import hashlib
import os
import platform
import re
import smtplib
import socket
import sqlite3
import sys
import time
import urllib.request
from os.path import expanduser
import telebot
from bs4 import BeautifulSoup
class SiteAlert:
def __init__(self):
self.__db = expanduser("~") + os.sep + "SiteAlert.db"
if not os.path.isfile(self.__db):
print("[WARNING]: No db found, creating a new one.")
connection = sqlite3.connect(self.__db)
connection.execute(
"CREATE TABLE `SiteAlert` (`name` TEXT NOT NULL UNIQUE,`link` TEXT NOT NULL,`hash` TEXT NOT NULL,PRIMARY KEY(link));")
connection.execute(
"CREATE TABLE 'Registered'('name' TEXT NOT NULL,'mail' TEXT NOT NULL, PRIMARY KEY(name, mail));")
connection.execute(
"CREATE TABLE Users ('mail' TEXT NOT NULL, 'telegram' TEXT NOT NULL UNIQUE, 'mailnotification' BOOLEAN NOT NULL DEFAULT TRUE, 'telegramnotification' BOOLEAN NOT NULL DEFAULT TRUE, PRIMARY KEY (mail));")
connection.close()
self.__connection = sqlite3.connect(self.__db, check_same_thread=False)
self.__header = [('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
('Accept-Encoding', 'none'),
('Accept-Language', 'en-US,en;q=0.8'),
('Connection', 'keep-alive')]
self.__TOKEN = os.environ['SITE_ALERT_TOKEN']
self.__MAIL = os.environ['SITE_ALERT_MAIL']
self.__PSW = os.environ['SITE_ALERT_PASSWORD']
self.__tb = telebot.TeleBot(self.__TOKEN)
self.saved_on_db()
def display_sites(self):
self.saved_on_db()
leng = len(self.__sites)
if leng != 0:
i = 1
for site in self.__sites:
print(str(i) + ") " + site[0])
i += 1
else:
print("You haven't checked any site!")
def clean_db(self):
nameSite = self.execute_fetch_all(
"SELECT name FROM SiteAlert EXCEPT SELECT name FROM Registered GROUP BY name", ())
for name in nameSite:
print("Removing \"%s\"..." % (name))
self.execute_query("DELETE FROM SiteAlert WHERE name = ?", (name[0],))
def __std_url(self, site):
if not site.startswith("http://") and not site.startswith("https://"):
site = "http://" + site
return site
def __url_encode(self, read):
read = BeautifulSoup(read, "html.parser")
read = re.sub("<!--[\s\S]*?-->", "", read.get_text())
read = re.sub("(?s)/\\*.*?\\*/", "", read)
return hashlib.md5(bytes(read, 'utf-8')).hexdigest()
def __save_file(self, nameSite, link, mail, telegram, hash):
try:
self.execute_query("INSERT INTO SiteAlert (name,link,hash) VALUES (?,?,?)", (
nameSite, link, hash))
mail = mail.split(";")
for m in mail:
self.execute_query("INSERT INTO Registered (name, mail) VALUES (?,?)", (
nameSite, m))
print("Site saved correctly!")
except sqlite3.IntegrityError:
self.execute_query("UPDATE SiteAlert SET hash=? WHERE name=?", (hash, nameSite))
print("Already exist a site with this credentials.")
def add_site(self, nameSite, link, mail='', telegram=''):
if link == "" or nameSite == "":
link = input("Insert the link for the site: ")
nameSite = input("Insert a name for the site: ")
mail = input(
"Insert the email where you want to be informed (if you want to add other mail, separate them with \";\"): ")
try:
link = self.__std_url(link)
urli = urllib.request.build_opener()
urli.addheaders = self.__header
urli = urli.open(link, timeout=10.0)
responseCode = urli.getcode()
if responseCode == 200:
self.__save_file(nameSite, link, mail, telegram, self.__url_encode(urli.read()))
elif responseCode == 404:
print("This page doesn't exist!")
else:
print("Generic error.")
except urllib.request.URLError:
print("There is an error with the link.")
except ConnectionResetError:
print("[ERROR]: Connection reset by peer: ")
except socket.timeout:
print("[ERROR]: Connection timeout")
def __send_mail(self, nameSite, link):
server = smtplib.SMTP("smtp.gmail.com:587")
server.starttls()
try:
server.login(self.__MAIL, self.__PSW)
subj = "The site \"" + nameSite + "\" has been changed!"
msg = "Subject: " + subj + "\n" + subj + "\nLink: " + link
mail = self.execute_fetch_all(
"SELECT Registered.mail FROM Users, Registered WHERE Registered.mail = Users.mail AND mailnotification = 'True' AND name=?",
(nameSite,))
for address in mail:
try:
server.sendmail(self.__MAIL, address, msg)
except smtplib.SMTPRecipientsRefused:
print("Error with this e-mail destination address: " + address)
server.close()
telegram = self.execute_fetch_all(
"SELECT telegram FROM Users, Registered WHERE telegramnotification = 'True' AND name=? AND Users.mail = Registered.mail",
(nameSite,))
for t in telegram:
try:
self.__tb.send_message(t[0], subj + "\nLink: " + link)
except telebot.apihelper.ApiException:
print("Bot kicked from " + t[0], ", removing from DB...")
self.execute_query(
"DELETE FROM Registered WHERE mail = (SELECT mail FROM Users WHERE telegram = ?)",
(t[0],))
self.execute_query("DELETE FROM Users WHERE telegram = ?", (t[0],))
except smtplib.SMTPAuthenticationError:
print("Error in the login process")
def check_site(self):
self.saved_on_db()
if len(self.__sites) > 0:
for site in self.__sites:
site = site[0]
query = self.execute_fetch_all("SELECT hash,link FROM SiteAlert WHERE name=?", (site,))[0]
hash = query[0]
link = query[1]
urli = urllib.request.build_opener()
urli.addheaders = self.__header
try:
urli = urli.open(link, timeout=10.0)
if hash == self.__url_encode(urli.read()):
print("The site \"" + site + "\" hasn't been changed!")
else:
print("The site \"" + site + "\" has been changed!")
self.add_site(site, link, "")
self.__send_mail(site, link)
except urllib.error.URLError:
print("[ERROR]: Network error: " + site)
except ConnectionResetError:
print("[ERROR]: Connection reset by peer: " + site)
except socket.timeout:
print("[ERROR]: Connection timeout: " + site)
else:
print("You haven't checked any site.")
return True
return False
def execute_query(self, query, parameters):
self.__connection.execute(query, parameters)
self.__connection.commit()
def execute_fetch_all(self, query, parameters):
saved_sites = self.__connection.execute(query, parameters).fetchall()
self.__connection.commit()
return saved_sites
def saved_on_db(self):
self.__sites = self.execute_fetch_all("SELECT name FROM SiteAlert", ())
return self.__sites
def number_req(self):
s = -1
self.display_sites()
while s <= 0 or s > len(self.__sites):
print("Number of the site: ", )
try:
s = int(input())
except ValueError:
s = -1
return s
def close_connection(self):
self.__connection.close()
def delete_site(self, name):
self.execute_query("DELETE FROM SiteAlert WHERE name=?", (name,))
self.execute_query("DELETE FROM Registered WHERE name=?", (name,))
def clear_screen():
if platform.system() == "Windows":
os.system("cls")
else:
os.system("clear")
def display_menu():
clear_screen()
print(
"What do you want to do?\n1) Display sites\n2) Add new site to check\n3) Fetch site\n4) Check sites\n5) Add e-mail to notification\n6) Remove e-mail from notification\n7) Delete a site\n8) Clean database\n9) Exit")
def choice():
clear_screen()
try:
x = -1
while not 1 <= x <= 9:
if x != 9:
display_menu()
x = int(input())
return x
except ValueError:
return 9
def main():
c = 1
n = len(sys.argv)
site_alert = SiteAlert()
while True:
s = ""
if c < n:
arg = sys.argv[c]
x = {"-a": 2, "-am": 5, "-b": 4, "-c": 4, "-cl": 8, "-d": 7, "-e": 9, "-f": 3,
"-h": 0, "-r": 6,
"-s": 1}.get(arg)
s = {"-b": "y", "-c": "n"}.get(arg)
c += 1
else:
x = choice()
clear_screen()
if x == 0:
print(
"Usage:\n-a -> add a new site\n-am -> add new e-mail address\n-b -> continuous check\n-c -> check once\n-cl -> clean database\n-d -> delete a site\n-e -> exit\n-h -> print this help\n-r -> remove e-mail address\n-s -> show the list of the sites")
elif x == 1:
site_alert.display_sites()
elif x == 2:
site_alert.add_site("", "")
elif x == 3:
saved = site_alert.saved_on_db()
if len(saved) != 0:
print("Write the number of the site that you want to fetch.")
nameSite = saved[site_alert.number_req() - 1][0]
query = site_alert.execute_fetch_all("SELECT link FROM SiteAlert WHERE name=?", (nameSite,))[0]
link = query[0]
site_alert.add_site(nameSite, link)
else:
print("You haven't checked any site.")
elif x == 4:
if s == "":
s = input("Do you want to check it continually? (Y/n)")
while len(s) == 0 or (s[0] != 'n' and s[0] != 'y'):
if len(s) == 0:
s = "y"
break
else:
s = input("Wrong input, do you want to check it continually? (Y/n)")
while True:
if site_alert.check_site() or s != "y":
break
else:
time.sleep(30)
site_alert.clean_db()
elif x == 5 or x == 6:
saved = site_alert.saved_on_db()
if len(saved) != 0:
print("Write the number of the site.")
nameSite = saved[site_alert.number_req() - 1][0]
mail = input("Insert e-mail: ")
if x == 5:
site_alert.execute_query("INSERT INTO Registered VALUES(?, ?)", (nameSite, mail))
else:
site_alert.execute_query("DELETE FROM Registered WHERE mail=? AND name=?", (mail, nameSite))
print("Action completed successfully!")
else:
print("You haven't checked any site.")
elif x == 7:
saved = site_alert.saved_on_db()
if len(saved) != 0:
print("Write the number of the site that you want to delete.")
index = site_alert.number_req() - 1
site_alert.delete_site(saved[index][0])
print("Site deleted successfully!")
else:
print("You haven't checked any site!")
elif x == 8:
site_alert.clean_db()
elif x != 9:
print("Unknown command: \"" + arg + "\"")
if x == 9:
site_alert.close_connection()
sys.exit(0)
input("Press enter to continue...")
if __name__ == "__main__":
main()
|
{
"content_hash": "944694db65e25f77ae98485a04eda88b",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 262,
"avg_line_length": 43.07988165680474,
"alnum_prop": 0.5199505528466452,
"repo_name": "ilteoood/SiteAlert-Python",
"id": "f42c0c13227f6c2619a9b24366679ea305e11828",
"size": "14561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SiteAlert.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25071"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: a4cb3e538622
Revises: None
Create Date: 2016-11-17 17:17:37.946675
"""
# revision identifiers, used by Alembic.
revision = 'a4cb3e538622'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), server_default='', nullable=False),
sa.Column('label', sa.Unicode(length=255), server_default='', nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.Unicode(length=255), server_default='', nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('password', sa.String(length=255), server_default='', nullable=False),
sa.Column('reset_password_token', sa.String(length=100), server_default='', nullable=False),
sa.Column('is_active', sa.Boolean(), server_default='0', nullable=False),
sa.Column('first_name', sa.Unicode(length=50), server_default='', nullable=False),
sa.Column('last_name', sa.Unicode(length=50), server_default='', nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('users_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users_roles')
op.drop_table('users')
op.drop_table('roles')
### end Alembic commands ###
|
{
"content_hash": "abd77384235781d8459a73d8714e4d2a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 96,
"avg_line_length": 37.27272727272727,
"alnum_prop": 0.6658536585365854,
"repo_name": "cynicalanlz/flask-este-code-sample",
"id": "2fe12ad50626de67e33b4752477b15bf60ed7f8e",
"size": "2050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/a4cb3e538622_.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6027"
},
{
"name": "Groff",
"bytes": "361"
},
{
"name": "HTML",
"bytes": "9606"
},
{
"name": "JavaScript",
"bytes": "760"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "21160"
},
{
"name": "Shell",
"bytes": "310"
}
],
"symlink_target": ""
}
|
import logging
import tempfile
import json
from chrome_remote_control import adb_commands
from chrome_remote_control import browser_backend
from chrome_remote_control import browser_gone_exception
class AndroidBrowserBackend(browser_backend.BrowserBackend):
"""The backend for controlling a browser instance running on Android.
"""
def __init__(self, options, adb, package,
is_content_shell, cmdline_file, activity, devtools_remote_port):
super(AndroidBrowserBackend, self).__init__(is_content_shell, options)
# Initialize fields so that an explosion during init doesn't break in Close.
self._options = options
self._adb = adb
self._package = package
self._cmdline_file = cmdline_file
self._activity = activity
self._port = 9222
self._devtools_remote_port = devtools_remote_port
# Kill old browser.
self._adb.KillAll(self._package)
self._adb.KillAll('device_forwarder')
self._adb.Forward('tcp:9222', self._devtools_remote_port)
# Chrome Android doesn't listen to --user-data-dir.
# TODO: symlink the app's Default, files and cache dir
# to somewhere safe.
if not is_content_shell and not options.dont_override_profile:
# Set up the temp dir
# self._tmpdir = '/sdcard/chrome_remote_control_data'
# self._adb.RunShellCommand('rm -r %s' % self._tmpdir)
# args.append('--user-data-dir=%s' % self._tmpdir)
pass
# Set up the command line.
if is_content_shell:
pseudo_exec_name = 'content_shell'
else:
pseudo_exec_name = 'chrome'
args = [pseudo_exec_name]
args.extend(self.GetBrowserStartupArgs())
with tempfile.NamedTemporaryFile() as f:
def EscapeIfNeeded(arg):
return arg.replace(' ', '" "')
f.write(' '.join([EscapeIfNeeded(arg) for arg in args]))
f.flush()
self._adb.Push(f.name, cmdline_file)
# Force devtools protocol on, if not already done.
if not is_content_shell:
# Make sure we can find the apps' prefs file
app_data_dir = '/data/data/%s' % self._package
prefs_file = (app_data_dir +
'/app_chrome/Default/Preferences')
if not self._adb.FileExistsOnDevice(prefs_file):
logging.critical(
'android_browser_backend: Could not find preferences file ' +
'%s for %s' % (prefs_file, self._package))
raise browser_gone_exception.BrowserGoneException(
'Missing preferences file.')
with tempfile.NamedTemporaryFile() as raw_f:
self._adb.Pull(prefs_file, raw_f.name)
with open(raw_f.name, 'r') as f:
txt_in = f.read()
preferences = json.loads(txt_in)
changed = False
if 'devtools' not in preferences:
preferences['devtools'] = {}
changed = True
if 'remote_enabled' not in preferences['devtools']:
preferences['devtools']['remote_enabled'] = True
changed = True
if preferences['devtools']['remote_enabled'] != True:
preferences['devtools']['remote_enabled'] = True
changed = True
if changed:
logging.warning('Manually enabled devtools protocol on %s' %
self._package)
with open(raw_f.name, 'w') as f:
txt = json.dumps(preferences, indent=2)
f.write(txt)
self._adb.Push(raw_f.name, prefs_file)
# Start it up!
self._adb.StartActivity(self._package,
self._activity,
True,
None,
'chrome://newtab/')
try:
self._WaitForBrowserToComeUp()
except:
import traceback
traceback.print_exc()
self.Close()
raise
def GetBrowserStartupArgs(self):
args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs()
args.append('--disable-fre')
return args
def __del__(self):
self.Close()
def Close(self):
self._adb.RunShellCommand('rm %s' % self._cmdline_file)
self._adb.KillAll(self._package)
def IsBrowserRunning(self):
pids = self._adb.ExtractPid(self._package)
return len(pids) != 0
def CreateForwarder(self, host_port):
return adb_commands.Forwarder(self._adb, host_port)
|
{
"content_hash": "1ede3944b3937088518b36a84e668c11",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 80,
"avg_line_length": 34.54032258064516,
"alnum_prop": 0.619892598645809,
"repo_name": "junmin-zhu/chromium-rivertrail",
"id": "223b8c8f81b41d2d94e19be422d15efcaf3c15b2",
"size": "4449",
"binary": false,
"copies": "1",
"ref": "refs/heads/v8-binding",
"path": "tools/chrome_remote_control/chrome_remote_control/android_browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75806807"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "145161929"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "1546515"
},
{
"name": "JavaScript",
"bytes": "18675242"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "6981387"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "926245"
},
{
"name": "Python",
"bytes": "8088373"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3239"
},
{
"name": "Shell",
"bytes": "1513486"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
}
|
import numpy as np
import alsaaudio as aa
from struct import unpack
import time
# Will read small read_size packets from the mic and append until "chunk" data has been read
def read_mic(chunk, input):
# Create an empty NumPy array that will store the mic data
data = np.array([])
amount_read = 0
while amount_read <= chunk:
l, temp = input.read()
#The stream was setup in NONBLOCKING mode, so if not data is ready yet the read will return 0
# Don't attempt to unpack zero-length data, instead sleep briefly to wait for more data
if l > 0:
temp = unpack("%dh"%(len(temp)/2),temp)
temp = np.array(temp, dtype='h')
data = np.append(data, temp).astype('i2')
amount_read += l
else:
time.sleep(0.0001)
#We may have read more than "chunk" amount of data, truncate the return array to only the samples that will be processed
# If too much data was read, this will drop samples, but a perfect recreation of the input stream is not required to display
# the audio spectrum.
return data[0:chunk]
#Moved from main.py, still need to fully test
def read_wavfile(chunk, input, output):
# Before processing samples in FFT, write raw data to speakers
output.write(data)
# Replace the %d in the format string with length of data chunk.
# Will not error if fewer than chunk samples are read at end of file
data = unpack("%dh"%(len(data)/2),data)
data = np.array(data, dtype='h')
|
{
"content_hash": "78ac9c452208f12fae523e4d17e7e475",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 127,
"avg_line_length": 41.69444444444444,
"alnum_prop": 0.6728847435043305,
"repo_name": "parrisha/raspi-visualizer",
"id": "7dcef1f36b9e2181284d7dbdde9470184ef4ec06",
"size": "1696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mic/mic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20045"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from mudu import urls, views
urlpatterns = [
url(r'^$', views.IndexPage.as_view(), name='index'),
url(r'^api/', include(urls)),
url(r'^gagou/', include(admin.site.urls)),
]
admin.site.site_title = _('MuDu45 Admin')
admin.site.site_header = _('MuDu45 Administration')
admin.site.index_title = _('MuDu45 Administration')
|
{
"content_hash": "cb291ddc3d85154110aeae741e43ec51",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 56,
"avg_line_length": 31,
"alnum_prop": 0.7075268817204301,
"repo_name": "TheBlackDude/mudu45",
"id": "3947db338fe71713983238e3e33b1132b2748d04",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "907"
},
{
"name": "HTML",
"bytes": "4619"
},
{
"name": "JavaScript",
"bytes": "270187"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "17239"
}
],
"symlink_target": ""
}
|
"""The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "Hello Name",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
|
{
"content_hash": "9200d23cddf43f8a38131470fb9b5b3a",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 132,
"avg_line_length": 32.6061320754717,
"alnum_prop": 0.4716817359855335,
"repo_name": "rohitranjan1991/home-assistant",
"id": "66f67d93754d26c6f31f7eb569da2eb5e815629e",
"size": "13825",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/template/test_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='drf_ujson',
version='1.2',
description='Django Rest Framework UJSON Renderer',
author='Gizmag',
author_email='tech@gizmag.com',
url='https://github.com/gizmag/drf-ujson-renderer',
packages=find_packages(),
install_requires=['django', 'ujson', 'djangorestframework']
)
|
{
"content_hash": "06454689fcc54c8064576508ee8036b8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6871508379888268,
"repo_name": "pombredanne/drf-ujson-renderer",
"id": "ba4e69365c634d7b26bda04535c24c57c29298d3",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3976"
}
],
"symlink_target": ""
}
|
"""Simplified chat demo for websockets.
Authentication, error handling, etc are left as an exercise for the reader :)
"""
import os.path
import uuid
import sys
import time
from collections import defaultdict
from twisted.python import log
from twisted.internet import reactor, task
import cyclone.escape
import cyclone.web
import cyclone.websocket
class Application(cyclone.web.Application):
def __init__(self):
stats = Stats()
handlers = [
(r"/", MainHandler, dict(stats=stats)),
(r"/stats", StatsPageHandler),
(r"/statssocket", StatsSocketHandler, dict(stats=stats)),
(r"/chatsocket", ChatSocketHandler, dict(stats=stats)),
]
settings = dict(
cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
autoescape=None,
)
cyclone.web.Application.__init__(self, handlers, **settings)
class MainHandler(cyclone.web.RequestHandler):
def initialize(self, stats):
self.stats = stats
def get(self):
self.stats.newVisit()
self.render("index.html", messages=ChatSocketHandler.cache)
class ChatSocketHandler(cyclone.websocket.WebSocketHandler):
waiters = set()
cache = []
cache_size = 200
def initialize(self, stats):
self.stats = stats
def connectionMade(self):
ChatSocketHandler.waiters.add(self)
self.stats.newChatter()
def connectionLost(self, reason):
ChatSocketHandler.waiters.remove(self)
self.stats.lostChatter()
@classmethod
def update_cache(cls, chat):
cls.cache.append(chat)
if len(cls.cache) > cls.cache_size:
cls.cache = cls.cache[-cls.cache_size:]
@classmethod
def send_updates(cls, chat):
log.msg("sending message to %d waiters" % len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.sendMessage(chat)
except Exception as e:
log.err("Error sending message. %s" % str(e))
def messageReceived(self, message):
log.msg("got message %s" % message)
parsed = cyclone.escape.json_decode(message)
chat = {
"id": str(uuid.uuid4()),
"body": parsed["body"],
}
chat["html"] = self.render_string("message.html", message=chat)
ChatSocketHandler.update_cache(chat)
ChatSocketHandler.send_updates(chat)
class StatsSocketHandler(cyclone.websocket.WebSocketHandler):
def initialize(self, stats):
self.stats = stats
self._updater = task.LoopingCall(self._sendData)
def connectionMade(self):
self._updater.start(2)
def connectionLost(self, reason):
self._updater.stop()
def _sendData(self):
data = dict(visits=self.stats.todaysVisits(),
chatters=self.stats.chatters)
self.sendMessage(cyclone.escape.json_encode(data))
class Stats(object):
def __init__(self):
self.visits = defaultdict(int)
self.chatters = 0
def todaysVisits(self):
today = time.localtime()
key = time.strftime('%Y%m%d', today)
return self.visits[key]
def newChatter(self):
self.chatters += 1
def lostChatter(self):
self.chatters -= 1
def newVisit(self):
today = time.localtime()
key = time.strftime('%Y%m%d', today)
self.visits[key] += 1
class StatsPageHandler(cyclone.web.RequestHandler):
def get(self):
self.render("stats.html")
def main():
reactor.listenTCP(8888, Application())
reactor.run()
if __name__ == "__main__":
log.startLogging(sys.stdout)
main()
|
{
"content_hash": "19861ff878c557be2ae9d71b9db36a09",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 26.575342465753426,
"alnum_prop": 0.6180412371134021,
"repo_name": "fiorix/cyclone",
"id": "c8e9d2667a56798bfcb9e4c57ef2b9edf4c12488",
"size": "4476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/websocket/chat/chatdemo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2386"
},
{
"name": "HTML",
"bytes": "32384"
},
{
"name": "Makefile",
"bytes": "642"
},
{
"name": "Python",
"bytes": "518718"
},
{
"name": "Shell",
"bytes": "9517"
}
],
"symlink_target": ""
}
|
'''
:author: Robin Flume
:contact: robin.flume@rub.de
'''
import os
class Job(object):
''' This class represents a job 'to do' for the Password Guessing Framework.
It includes the necessary configurations parsed from the file 'run.ini'.
'''
def __init__(self, logger):
''' Constructor.
'''
self.logger = logger
# Initiate job attributes with 'None' ...
self.label = None # [JT MyMy] without []
self.sh_guess = None # JTR_MARKOV.sh
self.sh_content = None # content of sh_file
self.training_file = None # /opt/pff/leaks/rockyou_training.txt
self.pw_file = None # /opt/pgf/leaks/myspace_guess.txt
self.pw_format = None # plaintext_pure
self.filetype = None # plaintext / hashvalues
self.analysis_interval = None # 1000
self.terminate_guessing = None # 100000000000
self.output_file = None # jtr_markov_myspace_myspace.txt
self.progress_file = None #_progress.csv
self.plot_file = None # [timestamp_plot_[uuid].csv
self.jtr_dir= None # /opt/pgf/john-hash/
self.jtr_input_format = None # raw-md5 --> john-hash will be used with parameter '--format=raw-md5'
self.jtr_session = None # PGF - will be set in the 'setup_jtr' method
self.jtr_log_file = None # PGF.log - will be set in the 'setup_jtr' method
self.jtr_pot_file = None # PGF.pot - will be set in the 'setup_jtr' method
self.jtr_command = None # ./john --stdin pw_file --session=pcfg_manager --pot=PGF --> will be set in the 'setup_jtr' method
self.analysis_process = None
def prepare_for_json(self):
''' Prepares a dictionary containing the Job attributes in order to parse it into the 'jobs.json' file for the visualization module.
:return Dictionary containing job attributes.
'''
self_as_dict = dict(self.__dict__) # Parsing into Dict-object necessaray as otherwise, next line will remove the logger also from the Job-object!
# adapt dict for easier processing
self_as_dict.pop('logger')
self_as_dict.pop('plot_file')
self_as_dict.pop('jtr_pot_file')
self_as_dict['runtime'] = 'Pending'
self_as_dict['output_file'] = os.path.basename(self_as_dict['output_file'])
self_as_dict['progress_file'] = os.path.basename(self_as_dict['progress_file'])
return self_as_dict
# **** SETTER METHODS ****
def set_label(self, label):
self.label = label
def set_sh_guess(self, sh_guess):
self.sh_guess = sh_guess
def set_sh_content(self, sh_content):
self.sh_content = sh_content
def set_pw_file(self, pw_file):
''' Sets the path to the password file.
If just a filename is given in the 'run.ini' file, the path is modified to point to the default path which is
'/opt/pgf/leaks'.
'''
if not '/' in pw_file: # pw_file is just a filename
pw_file = '/opt/pgf/leaks/%s' % pw_file # read the file from the default '/opt/pgf/leaks' directory
self.pw_file = os.path.abspath(pw_file) # resolve relative paths automatically
if not os.path.isfile(self.pw_file):
raise IOError("Password file not found!")
exit(-1) #TODO: correct/good exit method?
def set_training_file(self, training_file):
''' Sets the path to the password file.
If just a filename is given in the 'run.ini' file, the path is modified to point to the default path which is
'/opt/pgf/leaks'.
'''
if '/' not in training_file: # training_file is just a filename
training_file = '/opt/pgf/leaks/%s' % training_file # read the file from the default '/opt/pgf/leaks' directory
self.training_file = os.path.abspath(training_file) # resolve relative paths automatically
if not os.path.isfile(self.training_file):
raise IOError("Training file not found!")
exit(-1) #TODO: correct/good exit method?
def set_pw_format(self, pw_format):
self.pw_format = pw_format
def set_filetype(self, filetype):
self.filetype = filetype
def set_analysis_interval(self, analysis_interval):
self.analysis_interval = analysis_interval
def set_terminate_guessing(self, terminate_guessing):
self.terminate_guessing = terminate_guessing
def set_max_guesses(self, max_guesses):
self.max_guesses = max_guesses
def set_output_file(self, output_file):
''' Sets the path to the output file (csv).
If only a filename is given, the file will be created in the default 'results' folder of the PFG.
If the path is given as a relative path it is automatically resolved int the absolute path as needed among others
for the C-written analysis module for plaintext passwords.
'''
if output_file is None:
output_file = self.label # use the job label as name for thhe output file if not specified
if not '/' in output_file: # output_file is just a filename
output_file = './results/%s' % output_file # create the file in the 'results' folder of the PGF
if not output_file.endswith('.csv') and not output_file.endswith('.txt'):
output_file = '%s.csv' % output_file # append file ending if not present
self.output_file = os.path.abspath(output_file) # resolve relative paths automatically
def set_progress_file(self, progress_file):
self.progress_file = progress_file
def set_plot_file(self, plot_file):
self.plot_file = plot_file
def set_jtr_input_format(self, jtr_input_format):
self.jtr_input_format = jtr_input_format
def setup_jtr(self, jtr_dir, jtr_session_name):
''' Constructs paths needed to process the JtR output files.
:param jtr_dir: Installation directory of JtR to hash the received password candidates. A check is performed wether the jtr_dir string meets the requirements of the PGF to process it correctly.
:param jtr_session_name: Name of the session (DEFAULT: 'PGF') for JtR. The pot-file created by john will be deleted by the framework to not mix up the results of different guesser executions. Using a session solves the problem of deleting files that also include results of other, independent cracking attempts.
'''
# Check if 'jtr_dir' is a correct string to process by the PGF
if not jtr_dir.endswith('/'):
jtr_dir = "%s%s" % (jtr_dir, '/')
if not os.path.isdir(jtr_dir):
raise IOError("John the Ripper directory not found or is not a directory!")
exit(-1) #TODO: correct/good exit method?
else:
self.jtr_dir = jtr_dir
# create the paths of the session files of JtR
self.jtr_session = "%s%s" % (jtr_dir, jtr_session_name)
self.jtr_pot_file = "%s.pot" % self.jtr_session
self.jtr_log_file = "%s.log" % self.jtr_session
if self.jtr_input_format is None:
self.jtr_command = './john --external=AutoStatus --stdin "%s" --session="%s" --pot="%s"' % (self.pw_file, self.jtr_session, self.jtr_pot_file)
else:
self.jtr_command = './john --external=AutoStatus --stdin "%s" --format="%s" --session="%s" --pot="%s"' % (self.pw_file, self.jtr_input_format, self.jtr_session, self.jtr_pot_file)
def clear_jtr_pot_rec(self):
''' Clears the '.pot' and deletes the '.rec' file in the JtR directory to guarantee that every job starts cracking at 'point 0',
meaning no previous cracked hash values have been saved and would be skipped.
'''
if 'hash' in self.pw_format:
try:
f = open(self.jtr_pot_file, 'w') # clear the file instead of removing which led to "No such file"-Error
f.close()
except OSError, e:
self.logger.debug("The JtR '.pot' file cannot be deleted. <%s>" % str(e))
try:
os.remove(self.jtr_log_file.replace('.pot', '.rec')) # delete the rec file
except OSError, e:
self.logger.debug("The JtR '.rec' file could not be deleted. <%s>" % str(e))
def clear_jtr_log(self):
''' Clears the '.log' file in the JtR directory to not let it grow too much with the entries of cracked hashes.
'''
if 'hash' in self.pw_format:
try:
f = open(self.jtr_log_file, 'w') # clear the file instead of removing which led to "No such file"-Error
f.close()
except OSError, e:
self.logger.debug("The JtR '.log' file could not be cleared. <%s>" % str(e))
|
{
"content_hash": "e655bd314461780f02fc0ff2e218bf80",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 319,
"avg_line_length": 52.971098265895954,
"alnum_prop": 0.5967917939764295,
"repo_name": "RUB-SysSec/Password-Guessing-Framework",
"id": "861bba9946be169cd9e2169d1e5755dc249d407f",
"size": "9164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pgf/initiation/job.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1656"
},
{
"name": "HTML",
"bytes": "5268"
},
{
"name": "JavaScript",
"bytes": "6606"
},
{
"name": "Python",
"bytes": "129456"
},
{
"name": "Shell",
"bytes": "3078"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from assets.models import PersonInfoSerializer
from controk_webservice.suppliers.models import Supplier
class SupplierSerializer(serializers.ModelSerializer):
class Meta:
model = Supplier
fields = ['id', 'email', 'cnpj', 'trading_name']
class SupplierInfoSerializer(PersonInfoSerializer):
class Meta(PersonInfoSerializer.Meta):
model = Supplier
|
{
"content_hash": "56b05bcc9ea9e56e707f381b7a35bc62",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 56,
"avg_line_length": 28.066666666666666,
"alnum_prop": 0.7553444180522565,
"repo_name": "jourdanrodrigues/controk-webservice",
"id": "958f943d9be1484b7e3b7d3a9ec262a03da227a1",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controk_webservice/suppliers/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39986"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
}
|
import time
SUBJECT_FILENAME = "subjects.txt"
VALUE, WORK = 0, 1
#
# Problem 1: Building A Subject Dictionary
#
def loadSubjects(filename):
"""
Returns a dictionary mapping subject name to (value, work), where the name
is a string and the value and work are integers. The subject information is
read from the file named by the string filename. Each line of the file
contains a string of the form "name,value,work".
returns: dictionary mapping subject name to (value, work)
"""
subjects = {}
inputFile = open(filename)
for line in inputFile:
course, value, word = line.strip().split(',')
subjects[course] = (int(value), int(word))
return subjects
# TODO: Instead of printing each line, modify the above to parse the name,
# value, and work of each subject and create a dictionary mapping the name
# to the (value, work).
def printSubjects(subjects):
"""
Prints a string containing name, value, and work of each subject in
the dictionary of subjects and total value and work of all subjects
"""
totalVal, totalWork = 0,0
if len(subjects) == 0:
return 'Empty SubjectList'
res = 'Course\tValue\tWork\n======\t====\t=====\n'
subNames = list(subjects.keys())
subNames.sort()
for s in subNames:
val = subjects[s][VALUE]
work = subjects[s][WORK]
res = res + s + '\t' + str(val) + '\t' + str(work) + '\n'
totalVal += val
totalWork += work
res = res + '\nTotal Value:\t' + str(totalVal) +'\n'
res = res + 'Total Work:\t' + str(totalWork) + '\n'
print(res)
def cmpValue(subInfo1, subInfo2):
"""
Returns True if value in (value, work) tuple subInfo1 is GREATER than
value in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
return val1 > val2
def cmpWork(subInfo1, subInfo2):
"""
Returns True if work in (value, work) tuple subInfo1 is LESS than than work
in (value, work) tuple in subInfo2
"""
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return work1 < work2
def cmpRatio(subInfo1, subInfo2):
"""
Returns True if value/work in (value, work) tuple subInfo1 is
GREATER than value/work in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return float(val1) / work1 > float(val2) / work2
#
# Problem 2: Subject Selection By Greedy Optimization
#
def greedyAdvisor(subjects, maxWork, comparator):
"""
Returns a dictionary mapping subject name to (value, work) which includes
subjects selected by the algorithm, such that the total work of subjects in
the dictionary is not greater than maxWork. The subjects are chosen using
a greedy algorithm. The subjects dictionary should not be mutated.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
comparator: function taking two tuples and returning a bool
returns: dictionary mapping subject name to (value, work)
"""
selected = {}
changed = True
while changed:
changed = False
best = None
for key in subjects.keys():
#print("key =", key)
#print("best =", best)
if key in selected:
continue
elif subjects[key][WORK] <= maxWork and (best == None or comparator(subjects[key], subjects[best])):
best = key
changed = True
#print("found better: ", best, subjects[best])
if changed:
maxWork -= subjects[best][WORK]
selected[best] = subjects[best]
return selected
# Tests
##smallCatalog = {'6.00': (16, 8), '1.00': (7, 7), '6.01': (5, 3), '15.01': (9, 6)}
##print("cmpValue")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpValue))
##print("cmpWork")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpWork))
##print("cmpRatio")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpRatio))
##
##subjects = loadSubjects(SUBJECT_FILENAME)
##print("cmpValue")
##printSubjects(greedyAdvisor(subjects, 15, cmpValue))
##print("cmpWork")
##printSubjects(greedyAdvisor(subjects, 15, cmpWork))
##print("cmpRatio")
##printSubjects(greedyAdvisor(subjects, 15, cmpRatio))
def bruteForceAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work), which
represents the globally optimal selection of subjects using a brute force
algorithm.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
nameList = list(subjects.keys())
tupleList = list(subjects.values())
bestSubset, bestSubsetValue = \
bruteForceAdvisorHelper(tupleList, maxWork, 0, None, None, [], 0, 0)
outputSubjects = {}
for i in bestSubset:
outputSubjects[nameList[i]] = tupleList[i]
return outputSubjects
def bruteForceAdvisorHelper(subjects, maxWork, i, bestSubset, bestSubsetValue,
subset, subsetValue, subsetWork):
global num_calls
num_calls += 1
# Hit the end of the list.
if i >= len(subjects):
if bestSubset == None or subsetValue > bestSubsetValue:
# Found a new best.
return subset[:], subsetValue
else:
# Keep the current best.
return bestSubset, bestSubsetValue
else:
s = subjects[i]
# Try including subjects[i] in the current working subset.
if subsetWork + s[WORK] <= maxWork:
subset.append(i)
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects,
maxWork, i+1, bestSubset, bestSubsetValue, subset,
subsetValue + s[VALUE], subsetWork + s[WORK])
subset.pop()
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects,
maxWork, i+1, bestSubset, bestSubsetValue, subset,
subsetValue, subsetWork)
return bestSubset, bestSubsetValue
#
# Problem 3: Subject Selection By Brute Force
#
def bruteForceTime():
"""
Runs tests on bruteForceAdvisor and measures the time required to compute
an answer.
"""
subjects = loadSubjects(SUBJECT_FILENAME)
for work in range(1, 10):
start = time.time()
bruteForceAdvisor(subjects, work)
elapsed = time.time() - start
print("Elapsed time for work =", work, " was =", elapsed, "seconds")
# Problem 3 Observations
# ======================
#
# TODO: write here your observations regarding bruteForceTime's performance
#bruteForceTime()
##Elapsed time for work = 1 was = 0.016000032424926758 seconds
##Elapsed time for work = 2 was = 0.03099989891052246 seconds
##Elapsed time for work = 3 was = 0.12400007247924805 seconds
##Elapsed time for work = 4 was = 0.42100000381469727 seconds
##Elapsed time for work = 5 was = 1.2639999389648438 seconds
##Elapsed time for work = 6 was = 3.5879998207092285 seconds
##Elapsed time for work = 7 was = 12.869999885559082 seconds
##Elapsed time for work = 8 was = 34.37399983406067 seconds
##Elapsed time for work = 9 was = 92.40900015830994 seconds
#
# Problem 4: Subject Selection By Dynamic Programming
#
def dpAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work) that contains a
set of subjects that provides the maximum value without exceeding maxWork.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
courses = []
works = []
values = []
for key in subjects.keys():
courses.append(key)
works.append(subjects[key][0])
values.append(subjects[key][1])
memo = {}
winners = dpAdvisorHelper(works, values, len(values) - 1, maxWork, memo)
results = {}
for i in winners:
results[courses[i]] = (values[i], works[i])
return results
# TODO: This implementation is incomplete
# The result is not optimal
def dpAdvisorHelper(works, values, i, available_work, memo):
global num_calls
num_calls += 1
try:
return memo[(i, available_work)]
except KeyError:
pass
if i == 0:
if works[i] <= available_work:
memo[(i, available_work)] = [i]
return [i]
else:
return []
without_i = dpAdvisorHelper(works, values, i - 1, available_work, memo)
if works[i] > available_work:
memo[(i, available_work)] = without_i
return without_i
else:
with_i = [i] + dpAdvisorHelper(works, values, i - 1, available_work - works[i], memo)
if branch_value(with_i, values) >= branch_value(without_i, values):
winners = with_i
else:
winners = without_i
memo[(i, available_work)] = winners
return winners
def branch_value(branch, value):
total = 0
for i in branch:
total += value[i]
return total
##subjects = {'a1': (16, 8), 'b1': (7, 7), 'c1': (5, 3), 'd1': (9, 6)}
##work = 20
##subjects = loadSubjects(SUBJECT_FILENAME)
##work = 5
##print("\n>>> dpAdvisor <<< \n")
##num_calls = 0
##printSubjects(dpAdvisor(subjects, work))
##print("number of calls =", num_calls)
##
##print("\n>>> bruteForceAdvisor <<< \n")
##num_calls = 0
##printSubjects(bruteForceAdvisor(subjects, work))
##print("number of calls =", num_calls)
num_calls = 0
#
# Problem 5: Performance Comparison
#
def dpTime():
"""
Runs tests on dpAdvisor and measures the time required to compute an
answer.
"""
global num_calls
subjects = loadSubjects(SUBJECT_FILENAME)
for work in range(5, 100, 10):
start = time.time()
num_calls = 0
result = dpAdvisor(subjects, work)
#printSubjects(result)
elapsed = time.time() - start
print("Elapsed time for work =", work, " was =", elapsed, "seconds")
# Problem 5 Observations
# ======================
#
# TODO: write here your observations regarding dpAdvisor's performance and
# how its performance compares to that of bruteForceAdvisor.
##dpTime()
####Elapsed time for work = 5 was = 0.019999980926513672 seconds
####Elapsed time for work = 15 was = 0.08999991416931152 seconds
####Elapsed time for work = 25 was = 0.15999984741210938 seconds
####Elapsed time for work = 35 was = 0.25999999046325684 seconds
####Elapsed time for work = 45 was = 0.3710000514984131 seconds
####Elapsed time for work = 55 was = 0.49899983406066895 seconds
####Elapsed time for work = 65 was = 0.35899996757507324 seconds
####Elapsed time for work = 75 was = 0.7799999713897705 seconds
####Elapsed time for work = 85 was = 0.9200000762939453 seconds
####Elapsed time for work = 95 was = 1.1349999904632568 seconds
|
{
"content_hash": "0a03f81127d7c3956f7364f360cea095",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 112,
"avg_line_length": 32.93674698795181,
"alnum_prop": 0.6402377686328303,
"repo_name": "feliposz/learning-stuff",
"id": "20f55b57a547ba42cd3561c38a5791f44ba811a7",
"size": "11093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ps8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "43479"
},
{
"name": "Batchfile",
"bytes": "805"
},
{
"name": "C",
"bytes": "272611"
},
{
"name": "C#",
"bytes": "45057"
},
{
"name": "C++",
"bytes": "1955"
},
{
"name": "CSS",
"bytes": "37278"
},
{
"name": "Groff",
"bytes": "27095"
},
{
"name": "HTML",
"bytes": "1213734"
},
{
"name": "Haskell",
"bytes": "129361"
},
{
"name": "Java",
"bytes": "467704"
},
{
"name": "JavaScript",
"bytes": "132019"
},
{
"name": "Makefile",
"bytes": "5618"
},
{
"name": "NewLisp",
"bytes": "24"
},
{
"name": "PHP",
"bytes": "57486"
},
{
"name": "Pascal",
"bytes": "19005"
},
{
"name": "Perl",
"bytes": "39"
},
{
"name": "Python",
"bytes": "105058"
},
{
"name": "Racket",
"bytes": "499244"
},
{
"name": "Ruby",
"bytes": "5920"
},
{
"name": "Shell",
"bytes": "248"
},
{
"name": "Visual Basic",
"bytes": "7290"
},
{
"name": "XSLT",
"bytes": "791"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="heatmapgl", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "c00fbe3a30ef802feeeecda353ec67ef",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 84,
"avg_line_length": 36.81818181818182,
"alnum_prop": 0.6296296296296297,
"repo_name": "plotly/plotly.py",
"id": "e98413fe3a635fd720730a4d93a0001b60d8e03d",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmapgl/_uirevision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django.contrib.localflavor.es.forms import (ESPostalCodeField, ESPhoneNumberField,
ESIdentityCardNumberField, ESCCCField, ESRegionSelect, ESProvinceSelect)
from django.test import SimpleTestCase
class ESLocalFlavorTests(SimpleTestCase):
def test_ESRegionSelect(self):
f = ESRegionSelect()
out = u'''<select name="regions">
<option value="AN">Andalusia</option>
<option value="AR">Aragon</option>
<option value="O">Principality of Asturias</option>
<option value="IB">Balearic Islands</option>
<option value="PV">Basque Country</option>
<option value="CN">Canary Islands</option>
<option value="S">Cantabria</option>
<option value="CM">Castile-La Mancha</option>
<option value="CL">Castile and Leon</option>
<option value="CT" selected="selected">Catalonia</option>
<option value="EX">Extremadura</option>
<option value="GA">Galicia</option>
<option value="LO">La Rioja</option>
<option value="M">Madrid</option>
<option value="MU">Region of Murcia</option>
<option value="NA">Foral Community of Navarre</option>
<option value="VC">Valencian Community</option>
</select>'''
self.assertEqual(f.render('regions', 'CT'), out)
def test_ESProvinceSelect(self):
f = ESProvinceSelect()
out = u'''<select name="provinces">
<option value="01">Arava</option>
<option value="02">Albacete</option>
<option value="03">Alacant</option>
<option value="04">Almeria</option>
<option value="05">Avila</option>
<option value="06">Badajoz</option>
<option value="07">Illes Balears</option>
<option value="08" selected="selected">Barcelona</option>
<option value="09">Burgos</option>
<option value="10">Caceres</option>
<option value="11">Cadiz</option>
<option value="12">Castello</option>
<option value="13">Ciudad Real</option>
<option value="14">Cordoba</option>
<option value="15">A Coruna</option>
<option value="16">Cuenca</option>
<option value="17">Girona</option>
<option value="18">Granada</option>
<option value="19">Guadalajara</option>
<option value="20">Guipuzkoa</option>
<option value="21">Huelva</option>
<option value="22">Huesca</option>
<option value="23">Jaen</option>
<option value="24">Leon</option>
<option value="25">Lleida</option>
<option value="26">La Rioja</option>
<option value="27">Lugo</option>
<option value="28">Madrid</option>
<option value="29">Malaga</option>
<option value="30">Murcia</option>
<option value="31">Navarre</option>
<option value="32">Ourense</option>
<option value="33">Asturias</option>
<option value="34">Palencia</option>
<option value="35">Las Palmas</option>
<option value="36">Pontevedra</option>
<option value="37">Salamanca</option>
<option value="38">Santa Cruz de Tenerife</option>
<option value="39">Cantabria</option>
<option value="40">Segovia</option>
<option value="41">Seville</option>
<option value="42">Soria</option>
<option value="43">Tarragona</option>
<option value="44">Teruel</option>
<option value="45">Toledo</option>
<option value="46">Valencia</option>
<option value="47">Valladolid</option>
<option value="48">Bizkaia</option>
<option value="49">Zamora</option>
<option value="50">Zaragoza</option>
<option value="51">Ceuta</option>
<option value="52">Melilla</option>
</select>'''
self.assertEqual(f.render('provinces', '08'), out)
def test_ESPostalCodeField(self):
error_invalid = [u'Enter a valid postal code in the range and format 01XXX - 52XXX.']
valid = {
'08028': '08028',
'28080': '28080',
}
invalid = {
'53001': error_invalid,
'0801': error_invalid,
'080001': error_invalid,
'00999': error_invalid,
'08 01': error_invalid,
'08A01': error_invalid,
}
self.assertFieldOutput(ESPostalCodeField, valid, invalid)
def test_ESPhoneNumberField(self):
error_invalid = [u'Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.']
valid = {
'650010101': '650010101',
'931234567': '931234567',
'800123123': '800123123',
'789789789': '789789789',
}
invalid = {
'555555555': error_invalid,
'489489489': error_invalid,
'99123123': error_invalid,
'9999123123': error_invalid,
}
self.assertFieldOutput(ESPhoneNumberField, valid, invalid)
def test_ESIdentityCardNumberField(self):
error_invalid = [u'Please enter a valid NIF, NIE, or CIF.']
error_checksum_nif = [u'Invalid checksum for NIF.']
error_checksum_nie = [u'Invalid checksum for NIE.']
error_checksum_cif = [u'Invalid checksum for CIF.']
valid = {
'78699688J': '78699688J',
'78699688-J': '78699688J',
'78699688 J': '78699688J',
'78699688 j': '78699688J',
'X0901797J': 'X0901797J',
'X-6124387-Q': 'X6124387Q',
'X 0012953 G': 'X0012953G',
'x-3287690-r': 'X3287690R',
't-03287690r': 'T03287690R',
'P2907500I': 'P2907500I',
'B38790911': 'B38790911',
'B31234560': 'B31234560',
'B-3879091A': 'B3879091A',
'B 38790911': 'B38790911',
'P-3900800-H': 'P3900800H',
'P 39008008': 'P39008008',
'C-28795565': 'C28795565',
'C 2879556E': 'C2879556E',
}
invalid = {
'78699688T': error_checksum_nif,
'X-03287690': error_invalid,
'X-03287690-T': error_checksum_nie,
'B 38790917': error_checksum_cif,
'C28795567': error_checksum_cif,
'I38790911': error_invalid,
'78699688-2': error_invalid,
}
self.assertFieldOutput(ESIdentityCardNumberField, valid, invalid)
def test_ESCCCField(self):
error_invalid = [u'Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.']
error_checksum = [u'Invalid checksum for bank account number.']
valid = {
'20770338793100254321': '20770338793100254321',
'2077 0338 79 3100254321': '2077 0338 79 3100254321',
'2077-0338-79-3100254321': '2077-0338-79-3100254321',
}
invalid = {
'2077.0338.79.3100254321': error_invalid,
'2077-0338-78-3100254321': error_checksum,
'2077-0338-89-3100254321': error_checksum,
'2077-03-3879-3100254321': error_invalid,
}
self.assertFieldOutput(ESCCCField, valid, invalid)
|
{
"content_hash": "f7a8e9d5b71a94d6693d293aa2045c58",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 112,
"avg_line_length": 38.35672514619883,
"alnum_prop": 0.6339381003201707,
"repo_name": "mixman/djangodev",
"id": "ccd08b168d0265330a74717f7d2ffc81d08b77d8",
"size": "6559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/localflavor/es/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
}
|
"""
Basic chain parser tool for SmartChain functions.
Author: Tim M. (TM2013)
Co-Author: Bitkapp (aka alaniz)
Organization: Project Radon
Date: 2/17/2016
Requirements:
BitcoinRPC
An RPC-enabled client
"""
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import logging
import time
try:
import cPickle as pickle
except:
import pickle
# Debug settings
debug = True
if debug:
logging.basicConfig()
logging.getLogger("BitcoinRPC").setLevel(logging.DEBUG)
# RPC Configuration
rpc_user = "user"
rpc_pass = "pass"
rpc_port = "port"
class DataBase():
def __init__(self):
# Initialise database and RPC connection
self.loadSync()
self.rpc = AuthServiceProxy(("http://%s:%s@127.0.0.1:%s/") % (rpc_user, rpc_pass, rpc_port))
def saveSync(self):
# Dump database into a pickle
pickle.dump(self.block_data, open('block_data.p','wb'))
def loadSync(self):
# Load database from pickle
try:
self.block_data = pickle.load(open('block_data.p','rb'))
except IOError as e:
# If no pickle exists initialise a new database
self.block_data = {}
def syncFromLastBlock(self):
block_height = self.rpc.getblockcount()
# Sync from last block of existing database
try:
if self.block_data:
last_block = max(self.block_data.keys())
for block in range(last_block+1, block_height):
self.block_data[block] = self.rpc.getblockbynumber(block)["tx"]
# Start new sync process if new database has been initialised
else:
for block in range(0, block_height):
self.block_data[block] = self.rpc.getblockbynumber(block)["tx"]
except KeyboardInterrupt as e:
self.saveSync()
def returnBlock(self, blocknumber):
# Returns block data from database for a particular block
try:
block = self.block_data[blocknumber]
return block
except KeyError as e:
raise KeyError('Local database is not synced to required block height.')
def continuousSync(self):
while True:
self.syncFromLastBlock()
self.saveSync()
time.sleep(60)
#d = DataBase()
#d.syncFromLastBlock()
#d.saveSync()
#d.continuousSync()
|
{
"content_hash": "e44122f478f9228ea4868aec5f560517",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 100,
"avg_line_length": 29.9390243902439,
"alnum_prop": 0.6036659877800408,
"repo_name": "tm2013/SmartChain-OP_RETURN",
"id": "1a736d5d62c0777ed8971b64fd87310ebf9ce8c6",
"size": "2455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SmartChain_chain_parser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18918"
}
],
"symlink_target": ""
}
|
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.cluster import models as cluster_models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster import base as cluster_base
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy):
@property
def cluster_class(self):
return GaleraCommonCluster
@property
def cluster_view_class(self):
return GaleraCommonClusterView
@property
def mgmt_cluster_view_class(self):
return GaleraCommonMgmtClusterView
class GaleraCommonCluster(cluster_models.Cluster):
@staticmethod
def _validate_cluster_instances(context, instances, datastore,
datastore_version):
"""Validate the flavor and volume"""
ds_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
# Check number of instances is at least min_cluster_member_count
if num_instances < ds_conf.min_cluster_member_count:
raise exception.ClusterNumInstancesNotLargeEnough(
num_instances=ds_conf.min_cluster_member_count)
# Checking flavors and get delta for quota check
flavor_ids = [instance['flavor_id'] for instance in instances]
if len(set(flavor_ids)) != 1:
raise exception.ClusterFlavorsNotEqual()
flavor_id = flavor_ids[0]
nova_client = remote.create_nova_client(context)
try:
flavor = nova_client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
deltas = {'instances': num_instances}
# Checking volumes and get delta for quota check
volume_sizes = [instance['volume_size'] for instance in instances
if instance.get('volume_size', None)]
volume_size = None
if ds_conf.volume_support:
if len(volume_sizes) != num_instances:
raise exception.ClusterVolumeSizeRequired()
if len(set(volume_sizes)) != 1:
raise exception.ClusterVolumeSizesNotEqual()
volume_size = volume_sizes[0]
cluster_models.validate_volume_size(volume_size)
deltas['volumes'] = volume_size * num_instances
else:
if len(volume_sizes) > 0:
raise exception.VolumeNotSupported()
ephemeral_support = ds_conf.device_path
if ephemeral_support and flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
# quota check
check_quotas(context.tenant, deltas)
# Checking networks are same for the cluster
instance_nics = []
for instance in instances:
nics = instance.get('nics')
if nics:
instance_nics.append(nics[0].get('net-id'))
if len(set(instance_nics)) > 1:
raise exception.ClusterNetworksNotEqual()
if not instance_nics:
return
instance_nic = instance_nics[0]
try:
nova_client.networks.get(instance_nic)
except nova_exceptions.NotFound:
raise exception.NetworkNotFound(uuid=instance_nic)
@staticmethod
def _create_instances(context, db_info, datastore, datastore_version,
instances):
member_config = {"id": db_info.id,
"instance_type": "member"}
name_index = 1
for instance in instances:
if not instance.get("name"):
instance['name'] = "%s-member-%s" % (db_info.name,
str(name_index))
name_index += 1
return map(lambda instance:
Instance.create(context,
instance['name'],
instance['flavor_id'],
datastore_version.image_id,
[], [],
datastore, datastore_version,
instance.get('volume_size', None),
None,
availability_zone=instance.get(
'availability_zone', None),
nics=instance.get('nics', None),
configuration_id=None,
cluster_config=member_config
),
instances)
@classmethod
def create(cls, context, name, datastore, datastore_version,
instances, extended_properties):
LOG.debug("Initiating Galera cluster creation.")
cls._validate_cluster_instances(context, instances, datastore,
datastore_version)
# Updating Cluster Task
db_info = cluster_models.DBCluster.create(
name=name, tenant_id=context.tenant,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)
cls._create_instances(context, db_info, datastore, datastore_version,
instances)
# Calling taskmanager to further proceed for cluster-configuration
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return cls(context, db_info, datastore, datastore_version)
def _get_cluster_network_interfaces(self):
nova_client = remote.create_nova_client(self.context)
nova_instance_id = self.db_instances[0].compute_instance_id
interfaces = nova_client.virtual_interfaces.list(nova_instance_id)
ret = [{"net-id": getattr(interface, 'net_id')}
for interface in interfaces]
return ret
def grow(self, instances):
LOG.debug("Growing cluster %s." % self.id)
self.validate_cluster_available()
context = self.context
db_info = self.db_info
datastore = self.ds
datastore_version = self.ds_version
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
try:
# Get the network of the existing cluster instances.
interface_ids = self._get_cluster_network_interfaces()
for instance in instances:
instance["nics"] = interface_ids
new_instances = self._create_instances(
context, db_info, datastore, datastore_version, instances)
task_api.load(context, datastore_version.manager).grow_cluster(
db_info.id, [instance.id for instance in new_instances])
except Exception:
db_info.update(task_status=ClusterTasks.NONE)
return self.__class__(context, db_info,
datastore, datastore_version)
def shrink(self, instances):
"""Removes instances from a cluster."""
LOG.debug("Shrinking cluster %s." % self.id)
self.validate_cluster_available()
removal_instances = [Instance.load(self.context, inst_id)
for inst_id in instances]
db_instances = DBInstance.find_all(cluster_id=self.db_info.id).all()
if len(db_instances) - len(removal_instances) < 1:
raise exception.ClusterShrinkMustNotLeaveClusterEmpty()
self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER)
try:
task_api.load(self.context, self.ds_version.manager
).shrink_cluster(self.db_info.id,
[instance.id
for instance in removal_instances])
except Exception:
self.db_info.update(task_status=ClusterTasks.NONE)
return self.__class__(self.context, self.db_info,
self.ds, self.ds_version)
class GaleraCommonClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
class GaleraCommonMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
|
{
"content_hash": "b5a257e61e4d5d474c62b05835b67f6f",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 39.55203619909502,
"alnum_prop": 0.5914655073790184,
"repo_name": "mmasaki/trove",
"id": "c4bd58b27ceb6ee6e7fcf6f6c0ef5c4ec08c2959",
"size": "9372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/common/strategies/cluster/experimental/galera_common/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60539"
},
{
"name": "Python",
"bytes": "4204079"
},
{
"name": "Shell",
"bytes": "19186"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
import time
while True:
print "I need to make sure I'm still alive."
time.sleep(1)
|
{
"content_hash": "56b59690ff325462d84121ba1ab8ac44",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 46,
"avg_line_length": 17.6,
"alnum_prop": 0.6931818181818182,
"repo_name": "adcade/puppet-upstart",
"id": "4a5a662c2206e23c9df96be3539bfc6720331cf7",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "349"
},
{
"name": "JavaScript",
"bytes": "236"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "88"
},
{
"name": "Ruby",
"bytes": "335"
}
],
"symlink_target": ""
}
|
#
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile, ImagePalette
from ._binary import i32be as i32
__version__ = "0.3"
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == 0x59a66a95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# The Sun Raster file header is 32 bytes in length
# and has the following format:
# typedef struct _SunRaster
# {
# DWORD MagicNumber; /* Magic (identification) number */
# DWORD Width; /* Width of image in pixels */
# DWORD Height; /* Height of image in pixels */
# DWORD Depth; /* Number of bits per pixel */
# DWORD Length; /* Size of image data in bytes */
# DWORD Type; /* Type of raster file */
# DWORD ColorMapType; /* Type of color map */
# DWORD ColorMapLength; /* Size of the color map in bytes */
# } SUNRASTER;
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59a66a95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
data_length = i32(s[16:20]) # unreliable, ignore.
file_type = i32(s[20:24])
palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary
palette_length = i32(s[28:32])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 4:
self.mode, rawmode = "L", "L;4"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
if file_type == 3:
self.mode, rawmode = "RGB", "RGB"
else:
self.mode, rawmode = "RGB", "BGR"
elif depth == 32:
if file_type == 3:
self.mode, rawmode = 'RGB', 'RGBX'
else:
self.mode, rawmode = 'RGB', 'BGRX'
else:
raise SyntaxError("Unsupported Mode/Bit Depth")
if palette_length:
if palette_length > 1024:
raise SyntaxError("Unsupported Color Palette Length")
if palette_type != 1:
raise SyntaxError("Unsupported Palette Type")
offset = offset + palette_length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length))
if self.mode == "L":
self.mode = "P"
rawmode = rawmode.replace('L', 'P')
# 16 bit boundaries on stride
stride = ((self.size[0] * depth + 15) // 16) * 2
# file type: Type is the version (or flavor) of the bitmap
# file. The following values are typically found in the Type
# field:
# 0000h Old
# 0001h Standard
# 0002h Byte-encoded
# 0003h RGB format
# 0004h TIFF format
# 0005h IFF format
# FFFFh Experimental
# Old and standard are the same, except for the length tag.
# byte-encoded is run-length-encoded
# RGB looks similar to standard, but RGB byte order
# TIFF and IFF mean that they were converted from T/IFF
# Experimental means that it's something else.
# (https://www.fileformat.info/format/sunraster/egff.htm)
if file_type in (0, 1, 3, 4, 5):
self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))]
elif file_type == 2:
self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)]
else:
raise SyntaxError('Unsupported Sun Raster file type')
#
# registry
Image.register_open(SunImageFile.format, SunImageFile, _accept)
Image.register_extension(SunImageFile.format, ".ras")
|
{
"content_hash": "d6bac22d85cb73c411889359a5cbd9af",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 82,
"avg_line_length": 31.764705882352942,
"alnum_prop": 0.5483796296296296,
"repo_name": "isabernardes/Heriga",
"id": "fd5e82724df892a0b8fc00bf28a22cf4b593aa28",
"size": "4320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Herigaenv/lib/python2.7/site-packages/PIL/SunImagePlugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "662999"
},
{
"name": "HTML",
"bytes": "116009"
},
{
"name": "JavaScript",
"bytes": "848298"
},
{
"name": "Python",
"bytes": "5703559"
},
{
"name": "Shell",
"bytes": "3711"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.