repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cleverhans-lab/cleverhans | cleverhans/devtools/list_files.py | 2 | 2157 | """Code for listing files that belong to the library."""
import os
import cleverhans
def list_files(suffix=""):
"""
Returns a list of all files in CleverHans with the given suffix.
Parameters
----------
suffix : str
Returns
-------
file_list : list
A list of all files in CleverHans whose filepath ends with `suffix`.
"""
cleverhans_path = os.path.abspath(cleverhans.__path__[0])
# In some environments cleverhans_path does not point to a real directory.
# In such case return empty list.
if not os.path.isdir(cleverhans_path):
return []
repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir))
file_list = _list_files(cleverhans_path, suffix)
extra_dirs = [
"cleverhans_tutorials",
"examples",
"scripts",
"tests_tf",
"tests_pytorch",
]
for extra_dir in extra_dirs:
extra_path = os.path.join(repo_path, extra_dir)
if os.path.isdir(extra_path):
extra_files = _list_files(extra_path, suffix)
extra_files = [os.path.join(os.pardir, path) for path in extra_files]
file_list = file_list + extra_files
return file_list
def _list_files(path, suffix=""):
"""
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
"""
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return []
| mit | 7df4b7fe39fc4d5b5c12112d4d5b5caa | 27.012987 | 81 | 0.589708 | 3.810954 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/cleverhans/utils_tf.py | 1 | 32235 | """Utility functions for writing TensorFlow code"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import math
import os
import time
import warnings
import numpy as np
import six
from six.moves import xrange
import tensorflow as tf
from cleverhans.compat import device_lib
from cleverhans.compat import reduce_sum, reduce_mean
from cleverhans.compat import reduce_max
from cleverhans.compat import softmax_cross_entropy_with_logits
from cleverhans.utils import batch_indices, _ArgsWrapper, create_logger
_logger = create_logger("cleverhans.utils.tf")
_logger.setLevel(logging.INFO)
def model_loss(y, model, mean=True):
"""
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
warnings.warn(
"This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train."
)
op = model.op
if op.type == "Softmax":
(logits,) = op.inputs
else:
logits = model
out = softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = reduce_mean(out)
return out
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [
var for (var, init) in zip(global_vars, is_initialized) if not init
]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def train(
sess,
loss,
x,
y,
X_train,
Y_train,
save=False,
init_all=False,
evaluate=None,
feed=None,
args=None,
rng=None,
var_list=None,
fprop_args=None,
optimizer=None,
):
"""
Train a TF graph.
This function is deprecated. Prefer cleverhans.train.train when possible.
cleverhans.train.train supports multiple GPUs but this function is still
needed to support legacy models that do not support calling fprop more
than once.
:param sess: TF session to use when training the graph
:param loss: tensor, the model training loss.
:param x: input placeholder
:param y: output placeholder (for labels)
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:return: True if model trained
"""
warnings.warn(
"This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train."
)
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
if optimizer is None:
assert args.learning_rate is not None, (
"Learning rate was not given " "in args dict"
)
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Define optimizer
loss_value = loss.fprop(x, y, **fprop_args)
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError(
"optimizer object must be from a child class of " "tf.train.Optimizer"
)
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.minimize(loss_value, var_list=var_list)
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn(
"Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version."
)
sess.run(tf.initialize_all_variables())
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {
x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
}
if feed is not None:
feed_dict.update(feed)
train_step.run(feed_dict=feed_dict)
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info(
"Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds"
)
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
_logger.info("Completed model training and saved at: " + str(save_path))
else:
_logger.info("Completed model training.")
return True
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None, feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
global _model_eval_cache
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument " "must be supplied.")
# Define accuracy symbolically
key = (y, predictions)
if key in _model_eval_cache:
correct_preds = _model_eval_cache[key]
else:
correct_preds = tf.equal(tf.argmax(y, axis=-1), tf.argmax(predictions, axis=-1))
_model_eval_cache[key] = correct_preds
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:], dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:], dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
_model_eval_cache = {}
def tf_model_load(sess, file_path=None):
"""
:param sess: the session object to restore
:param file_path: path to the restored session, if None is
taken from FLAGS.train_dir and FLAGS.filename
:return:
"""
with sess.as_default():
saver = tf.train.Saver()
if file_path is None:
error = "file_path argument is missing."
raise ValueError(error)
saver.restore(sess, file_path)
return True
def batch_eval(*args, **kwargs):
"""
Wrapper around deprecated function.
"""
# Inside function to avoid circular import
from cleverhans.evaluation import batch_eval as new_batch_eval
warnings.warn(
"batch_eval has moved to cleverhans.evaluation. "
"batch_eval will be removed from utils_tf on or after "
"2019-03-09."
)
return new_batch_eval(*args, **kwargs)
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
with tf.name_scope(scope, "l2_batch_normalize") as name_scope:
x_shape = tf.shape(x)
x = tf.contrib.layers.flatten(x)
x /= epsilon + reduce_max(tf.abs(x), 1, keepdims=True)
square_sum = reduce_sum(tf.square(x), 1, keepdims=True)
x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
x_norm = tf.multiply(x, x_inv_norm)
return tf.reshape(x_norm, x_shape, name_scope)
def kl_with_logits(
p_logits, q_logits, scope=None, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES
):
"""Helper function to compute kl-divergence KL(p || q)"""
with tf.name_scope(scope, "kl_divergence") as name:
p = tf.nn.softmax(p_logits)
p_log = tf.nn.log_softmax(p_logits)
q_log = tf.nn.log_softmax(q_logits)
loss = reduce_mean(reduce_sum(p * (p_log - q_log), axis=1), name=name)
tf.losses.add_loss(loss, loss_collection)
return loss
def clip_eta(eta, ord, eps):
"""
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epsilon, bound of the perturbation.
"""
# Clipping perturbation eta to self.ord norm ball
if ord not in [np.inf, 1, 2]:
raise ValueError("ord must be np.inf, 1, or 2.")
reduc_ind = list(xrange(1, len(eta.get_shape())))
avoid_zero_div = 1e-12
if ord == np.inf:
eta = clip_by_value(eta, -eps, eps)
elif ord == 1:
# Implements a projection algorithm onto the l1-ball from
# (Duchi et al. 2008) that runs in time O(d*log(d)) where d is the
# input dimension.
# Paper link (Duchi et al. 2008): https://dl.acm.org/citation.cfm?id=1390191
eps = tf.cast(eps, eta.dtype)
dim = tf.reduce_prod(tf.shape(eta)[1:])
eta_flat = tf.reshape(eta, (-1, dim))
abs_eta = tf.abs(eta_flat)
if "sort" in dir(tf):
mu = -tf.sort(-abs_eta, axis=-1)
else:
# `tf.sort` is only available in TF 1.13 onwards
mu = tf.nn.top_k(abs_eta, k=dim, sorted=True)[0]
cumsums = tf.cumsum(mu, axis=-1)
js = tf.cast(tf.divide(1, tf.range(1, dim + 1)), eta.dtype)
t = tf.cast(tf.greater(mu - js * (cumsums - eps), 0), eta.dtype)
rho = tf.argmax(t * cumsums, axis=-1)
rho_val = tf.reduce_max(t * cumsums, axis=-1)
theta = tf.divide(rho_val - eps, tf.cast(1 + rho, eta.dtype))
eta_sgn = tf.sign(eta_flat)
eta_proj = eta_sgn * tf.maximum(abs_eta - theta[:, tf.newaxis], 0)
eta_proj = tf.reshape(eta_proj, tf.shape(eta))
norm = tf.reduce_sum(tf.abs(eta), reduc_ind)
eta = tf.where(tf.greater(norm, eps), eta_proj, eta)
elif ord == 2:
# avoid_zero_div must go inside sqrt to avoid a divide by zero
# in the gradient through this operation
norm = tf.sqrt(
tf.maximum(
avoid_zero_div, reduce_sum(tf.square(eta), reduc_ind, keepdims=True)
)
)
# We must *clip* to within the norm ball, not *normalize* onto the
# surface of the ball
factor = tf.minimum(1.0, div(eps, norm))
eta = eta * factor
return eta
def zero_out_clipped_grads(grad, x, clip_min, clip_max):
"""
Helper function to erase entries in the gradient where the update would be
clipped.
:param grad: The gradient
:param x: The current input
:param clip_min: Minimum input component value
:param clip_max: Maximum input component value
"""
signed_grad = tf.sign(grad)
# Find input components that lie at the boundary of the input range, and
# where the gradient points in the wrong direction.
clip_low = tf.logical_and(
tf.less_equal(x, tf.cast(clip_min, x.dtype)), tf.less(signed_grad, 0)
)
clip_high = tf.logical_and(
tf.greater_equal(x, tf.cast(clip_max, x.dtype)), tf.greater(signed_grad, 0)
)
clip = tf.logical_or(clip_low, clip_high)
grad = tf.where(clip, mul(grad, 0), grad)
return grad
def random_exponential(shape, rate=1.0, dtype=tf.float32, seed=None):
"""
Helper function to sample from the exponential distribution, which is not
included in core TensorFlow.
:shape: shape of the sampled tensor.
:rate: (optional) rate parameter of the exponential distribution, defaults to 1.0.
:dtype: (optional) data type of the sempled tensor, defaults to tf.float32.
:seed: (optional) custom seed to be used for sampling.
"""
return tf.random_gamma(shape, alpha=1, beta=1.0 / rate, dtype=dtype, seed=seed)
def random_laplace(shape, loc=0.0, scale=1.0, dtype=tf.float32, seed=None):
"""
Helper function to sample from the Laplace distribution, which is not
included in core TensorFlow.
:shape: shape of the sampled tensor.
:loc: (optional) mean of the laplace distribution, defaults to 0.0.
:scale: (optional) scale parameter of the laplace diustribution, defaults to 1.0.
:dtype: (optional) data type of the sempled tensor, defaults to tf.float32.
:seed: (optional) custom seed to be used for sampling.
"""
z1 = random_exponential(shape, 1.0 / scale, dtype=dtype, seed=seed)
z2 = random_exponential(shape, 1.0 / scale, dtype=dtype, seed=seed)
return z1 - z2 + loc
def random_lp_vector(shape, ord, eps, dtype=tf.float32, seed=None):
"""
Helper function to generate uniformly random vectors from a norm ball of
radius epsilon.
:param shape: Output shape of the random sample. The shape is expected to be
of the form `(n, d1, d2, ..., dn)` where `n` is the number of
i.i.d. samples that will be drawn from a norm ball of dimension
`d1*d1*...*dn`.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epsilon, radius of the norm ball.
"""
if ord not in [np.inf, 1, 2]:
raise ValueError("ord must be np.inf, 1, or 2.")
if ord == np.inf:
r = tf.random_uniform(shape, -eps, eps, dtype=dtype, seed=seed)
else:
# For ord=1 and ord=2, we use the generic technique from
# (Calafiore et al. 1998) to sample uniformly from a norm ball.
# Paper link (Calafiore et al. 1998):
# https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=758215&tag=1
# We first sample from the surface of the norm ball, and then scale by
# a factor `w^(1/d)` where `w~U[0,1]` is a standard uniform random variable
# and `d` is the dimension of the ball. In high dimensions, this is roughly
# equivalent to sampling from the surface of the ball.
dim = tf.reduce_prod(shape[1:])
if ord == 1:
x = random_laplace(
(shape[0], dim), loc=1.0, scale=1.0, dtype=dtype, seed=seed
)
norm = tf.reduce_sum(tf.abs(x), axis=-1, keepdims=True)
elif ord == 2:
x = tf.random_normal((shape[0], dim), dtype=dtype, seed=seed)
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=-1, keepdims=True))
else:
raise ValueError("ord must be np.inf, 1, or 2.")
w = tf.pow(
tf.random.uniform((shape[0], 1), dtype=dtype, seed=seed),
1.0 / tf.cast(dim, dtype),
)
r = eps * tf.reshape(w * x / norm, shape)
return r
def model_train(
sess,
x,
y,
predictions,
X_train,
Y_train,
save=False,
predictions_adv=None,
init_all=True,
evaluate=None,
feed=None,
args=None,
rng=None,
var_list=None,
):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:return: True if model trained
"""
warnings.warn(
"This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train."
)
args = _ArgsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Define loss
loss = model_loss(y, predictions)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_step = train_step.minimize(loss, var_list=var_list)
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn(
"Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version."
)
sess.run(tf.initialize_all_variables())
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {
x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
}
if feed is not None:
feed_dict.update(feed)
train_step.run(feed_dict=feed_dict)
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info(
"Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds"
)
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
_logger.info("Completed model training and saved at: " + str(save_path))
else:
_logger.info("Completed model training.")
return True
def infer_devices(devices=None):
"""
Returns the list of devices that multi-replica code should use.
:param devices: list of string device names, e.g. ["/GPU:0"]
If the user specifies this, `infer_devices` checks that it is
valid, and then uses this user-specified list.
If the user does not specify this, infer_devices uses:
- All available GPUs, if there are any
- CPU otherwise
"""
if devices is None:
devices = get_available_gpus()
if len(devices) == 0:
warnings.warn("No GPUS, running on CPU")
# Set device to empy string, tf will figure out whether to use
# XLA or not, etc., automatically
devices = [""]
else:
assert len(devices) > 0
for device in devices:
assert isinstance(device, six.string_types), type(device)
return devices
def get_available_gpus():
"""
Returns a list of string names of all available GPUs
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
def silence():
"""
Silences tensorflaw's default printed messages
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def clip_by_value(t, clip_value_min, clip_value_max, name=None):
"""
A wrapper for clip_by_value that casts the clipping range if needed.
"""
def cast_clip(clip):
"""
Cast clipping range argument if needed.
"""
if t.dtype in (tf.float32, tf.float64):
if hasattr(clip, "dtype"):
# Convert to tf dtype in case this is a numpy dtype
clip_dtype = tf.as_dtype(clip.dtype)
if clip_dtype != t.dtype:
return tf.cast(clip, t.dtype)
return clip
clip_value_min = cast_clip(clip_value_min)
clip_value_max = cast_clip(clip_value_max)
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def mul(a, b):
"""
A wrapper around tf multiplication that does more automatic casting of
the input.
"""
def multiply(a, b):
"""Multiplication"""
return a * b
return op_with_scalar_cast(a, b, multiply)
def div(a, b):
"""
A wrapper around tf division that does more automatic casting of
the input.
"""
def divide(a, b):
"""Division"""
return a / b
return op_with_scalar_cast(a, b, divide)
def op_with_scalar_cast(a, b, f):
"""
Builds the graph to compute f(a, b).
If only one of the two arguments is a scalar and the operation would
cause a type error without casting, casts the scalar to match the
tensor.
:param a: a tf-compatible array or scalar
:param b: a tf-compatible array or scalar
"""
try:
return f(a, b)
except (TypeError, ValueError):
pass
def is_scalar(x):
"""Return True if `x` is a scalar"""
if hasattr(x, "get_shape"):
shape = x.get_shape()
return shape.ndims == 0
if hasattr(x, "ndim"):
return x.ndim == 0
assert isinstance(x, (int, float))
return True
a_scalar = is_scalar(a)
b_scalar = is_scalar(b)
if a_scalar and b_scalar:
raise TypeError("Trying to apply " + str(f) + " with mixed types")
if a_scalar and not b_scalar:
a = tf.cast(a, b.dtype)
if b_scalar and not a_scalar:
b = tf.cast(b, a.dtype)
return f(a, b)
def assert_less_equal(*args, **kwargs):
"""
Wrapper for tf.assert_less_equal
Overrides tf.device so that the assert always goes on CPU.
The unwrapped version raises an exception if used with tf.device("/GPU:x").
"""
with tf.device("/CPU:0"):
return tf.assert_less_equal(*args, **kwargs)
def assert_greater_equal(*args, **kwargs):
"""
Wrapper for tf.assert_greater_equal.
Overrides tf.device so that the assert always goes on CPU.
The unwrapped version raises an exception if used with tf.device("/GPU:x").
"""
with tf.device("/CPU:0"):
return tf.assert_greater_equal(*args, **kwargs)
def assert_equal(*args, **kwargs):
"""
Wrapper for tf.assert_equal.
Overrides tf.device so that the assert always goes on CPU.
The unwrapped version raises an exception if used with tf.device("/GPU:x").
"""
with tf.device("/CPU:0"):
return tf.assert_equal(*args, **kwargs)
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
(derivatives,) = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
def jacobian_augmentation(
sess, x, X_sub_prev, Y_sub, grads, lmbda, aug_batch_size=512, feed=None
):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
num_samples = X_sub_prev.shape[0]
# Creating and processing as batch
for p_idxs in range(0, num_samples, aug_batch_size):
X_batch = X_sub_prev[p_idxs : p_idxs + aug_batch_size, ...]
feed_dict = {x: X_batch}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
for (indx, ind) in zip(
range(p_idxs, p_idxs + X_batch.shape[0]), range(X_batch.shape[0])
):
X_sub[num_samples + indx] = (
X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...]
)
# Return augmented training data (needs to be labeled afterwards)
return X_sub
| mit | 1b59a8c790682869c4ebd4ead0a669b5 | 34.697674 | 88 | 0.61669 | 3.805785 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/examples/RL-attack/train.py | 1 | 15594 | import argparse
import gym
import numpy as np
import os
import tensorflow as tf
import tempfile
import time
import json
import random
import rlattack.common.tf_util as U
from rlattack import logger
from rlattack import deepq
from rlattack.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rlattack.common.misc_util import (
boolean_flag,
pickle_load,
pretty_eta,
relatively_safe_pickle_dump,
set_global_seeds,
RunningAvg,
SimpleMonitor,
)
from rlattack.common.schedules import LinearSchedule, PiecewiseSchedule
# when updating this to non-deprecated ones, it is important to
# copy over LazyFrames
from rlattack.common.atari_wrappers_deprecated import wrap_dqn
from rlattack.common.azure_utils import Container
from model import model, dueling_model
from statistics import statistics
def parse_args():
parser = argparse.ArgumentParser("DQN experiments for Atari games")
# Environment
parser.add_argument("--env", type=str, default="Pong", help="name of the game")
parser.add_argument("--seed", type=int, default=42, help="which seed to use")
# Core DQN parameters
parser.add_argument(
"--replay-buffer-size", type=int, default=int(1e6), help="replay buffer size"
)
parser.add_argument(
"--lr", type=float, default=1e-4, help="learning rate for Adam optimizer"
)
parser.add_argument(
"--num-steps",
type=int,
default=int(2e8),
help="total number of steps to \
run the environment for",
)
parser.add_argument(
"--batch-size",
type=int,
default=32,
help="number of transitions to optimize \
at the same time",
)
parser.add_argument(
"--learning-freq",
type=int,
default=4,
help="number of iterations between \
every optimization step",
)
parser.add_argument(
"--target-update-freq",
type=int,
default=40000,
help="number of iterations between \
every target network update",
)
# Bells and whistles
boolean_flag(parser, "noisy", default=False, help="whether or not to NoisyNetwork")
boolean_flag(
parser, "double-q", default=True, help="whether or not to use double q learning"
)
boolean_flag(
parser, "dueling", default=False, help="whether or not to use dueling model"
)
boolean_flag(
parser,
"prioritized",
default=False,
help="whether or not to use prioritized replay buffer",
)
parser.add_argument(
"--prioritized-alpha",
type=float,
default=0.6,
help="alpha parameter for prioritized replay buffer",
)
parser.add_argument(
"--prioritized-beta0",
type=float,
default=0.4,
help="initial value of beta \
parameters for prioritized replay",
)
parser.add_argument(
"--prioritized-eps",
type=float,
default=1e-6,
help="eps parameter for prioritized replay buffer",
)
# Checkpointing
parser.add_argument(
"--save-dir",
type=str,
default=None,
required=True,
help="directory in which \
training state and model should be saved.",
)
parser.add_argument(
"--save-azure-container",
type=str,
default=None,
help="It present data will saved/loaded from Azure. \
Should be in format ACCOUNT_NAME:ACCOUNT_KEY:\
CONTAINER",
)
parser.add_argument(
"--save-freq",
type=int,
default=1e6,
help="save model once every time this many \
iterations are completed",
)
boolean_flag(
parser,
"load-on-start",
default=True,
help="if true and model was previously saved then training \
will be resumed",
)
# V: Attack Arguments #
parser.add_argument(
"--attack", type=str, default=None, help="Method to attack the model."
)
parser.add_argument(
"--attack-init", type=int, default=0, help="Iteration no. to begin attacks"
)
parser.add_argument(
"--attack-prob",
type=float,
default=0.0,
help="Probability of attack at each step, \
float in range 0 - 1.0",
)
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = SimpleMonitor(env)
env = wrap_dqn(monitored_env)
return env, monitored_env
def maybe_save_model(savedir, container, state):
if savedir is None:
return
start_time = time.time()
model_dir = "model-{}".format(state["num_iters"])
U.save_state(os.path.join(savedir, model_dir, "saved"))
if container is not None:
container.put(os.path.join(savedir, model_dir), model_dir)
relatively_safe_pickle_dump(
state, os.path.join(savedir, "training_state.pkl.zip"), compression=True
)
if container is not None:
container.put(
os.path.join(savedir, "training_state.pkl.zip"), "training_state.pkl.zip"
)
relatively_safe_pickle_dump(
state["monitor_state"], os.path.join(savedir, "monitor_state.pkl")
)
if container is not None:
container.put(os.path.join(savedir, "monitor_state.pkl"), "monitor_state.pkl")
logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, "training_state.pkl.zip"))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, "training_state.pkl.zip")
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log(
"Loaded models checkpoint at {} iterations".format(state["num_iters"])
)
return state
if __name__ == "__main__":
args = parse_args()
# Parse savedir and azure container.
savedir = args.save_dir
if args.save_azure_container is not None:
account_name, account_key, container_name = args.save_azure_container.split(":")
container = Container(
account_name=account_name,
account_key=account_key,
container_name=container_name,
maybe_create=True,
)
if savedir is None:
# Careful! This will not get cleaned up.
savedir = tempfile.TemporaryDirectory().name
else:
container = None
# Create and seed the env.
env, monitored_env = make_env(args.env)
if args.seed > 0:
set_global_seeds(args.seed)
env.unwrapped.seed(args.seed)
# V: Save arguments, configure log dump path to savedir #
if savedir:
with open(os.path.join(savedir, "args.json"), "w") as f:
json.dump(vars(args), f)
logger.configure(dir=savedir) # log to savedir
with U.make_session(4) as sess:
# Create training graph and replay buffer
act, train, update_target, debug, craft_adv = deepq.build_train(
make_obs_ph=lambda name: U.Uint8Input(
env.observation_space.shape, name=name
),
q_func=dueling_model if args.dueling else model,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4),
gamma=0.99,
grad_norm_clipping=10,
double_q=args.double_q,
noisy=args.noisy,
attack=args.attack,
)
approximate_num_iters = args.num_steps / 4
exploration = PiecewiseSchedule(
[
(0, 1.0),
(approximate_num_iters / 50, 0.1),
(approximate_num_iters / 5, 0.01),
],
outside_value=0.01,
)
if args.prioritized:
replay_buffer = PrioritizedReplayBuffer(
args.replay_buffer_size, args.prioritized_alpha
)
beta_schedule = LinearSchedule(
approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0
)
else:
replay_buffer = ReplayBuffer(args.replay_buffer_size)
U.initialize()
update_target()
num_iters = 0
# Load the model
state = maybe_load_model(savedir, container)
if state is not None:
num_iters, replay_buffer = (
state["num_iters"],
state["replay_buffer"],
)
monitored_env.set_state(state["monitor_state"])
start_time, start_steps = None, None
steps_per_iter = RunningAvg(0.999)
iteration_time_est = RunningAvg(0.999)
obs = env.reset()
# Record the mean of the \sigma
sigma_name_list = []
sigma_list = []
for param in tf.trainable_variables():
# only record the \sigma in the action network
if "sigma" in param.name and "deepq/q_func/action_value" in param.name:
summary_name = (
param.name.replace("deepq/q_func/action_value/", "")
.replace("/", ".")
.split(":")[0]
)
sigma_name_list.append(summary_name)
sigma_list.append(tf.reduce_mean(tf.abs(param)))
f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
# Statistics
writer = tf.summary.FileWriter(savedir, sess.graph)
im_stats = statistics(
scalar_keys=["action", "im_reward", "td_errors", "huber_loss"]
+ sigma_name_list
)
ep_stats = statistics(scalar_keys=["ep_reward", "ep_length"])
# Main trianing loop
ep_length = 0
while True:
num_iters += 1
ep_length += 1
# V: Perturb observation if we are past the init stage
# and at a designated attack step
# if craft_adv != None and (num_iters >= args.attack_init)
# and ((num_iters - args.attack_init) % args.attack_freq == 0) :
if (
craft_adv is not None
and (num_iters >= args.attack_init)
and (random.random() <= args.attack_prob)
):
obs = craft_adv(np.array(obs)[None])[0]
# Take action and store transition in the replay buffer.
if args.noisy:
# greedily choose
action = act(np.array(obs)[None], stochastic=False)[0]
else:
# epsilon greedy
action = act(
np.array(obs)[None], update_eps=exploration.value(num_iters)
)[0]
new_obs, rew, done, info = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
if done:
obs = env.reset()
if (
num_iters > max(5 * args.batch_size, args.replay_buffer_size // 20)
and num_iters % args.learning_freq == 0
):
# Sample a bunch of transitions from replay buffer
if args.prioritized:
experience = replay_buffer.sample(
args.batch_size, beta=beta_schedule.value(num_iters)
)
(
obses_t,
actions,
rewards,
obses_tp1,
dones,
weights,
batch_idxes,
) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
args.batch_size
)
weights = np.ones_like(rewards)
# Minimize the error in Bellman's and compute TD-error
td_errors, huber_loss = train(
obses_t, actions, rewards, obses_tp1, dones, weights
)
# Update the priorities in the replay buffer
if args.prioritized:
new_priorities = np.abs(td_errors) + args.prioritized_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Write summary
mean_sigma = f_mean_sigma()
im_stats.add_all_summary(
writer,
[action, rew, np.mean(td_errors), np.mean(huber_loss)] + mean_sigma,
num_iters,
)
# Update target network.
if num_iters % args.target_update_freq == 0:
update_target()
if start_time is not None:
steps_per_iter.update(info["steps"] - start_steps)
iteration_time_est.update(time.time() - start_time)
start_time, start_steps = time.time(), info["steps"]
# Save the model and training state.
if num_iters > 0 and (
num_iters % args.save_freq == 0 or info["steps"] > args.num_steps
):
maybe_save_model(
savedir,
container,
{
"replay_buffer": replay_buffer,
"num_iters": num_iters,
"monitor_state": monitored_env.get_state(),
},
)
if info["steps"] > args.num_steps:
break
if done:
steps_left = args.num_steps - info["steps"]
completion = np.round(info["steps"] / args.num_steps, 1)
mean_ep_reward = np.mean(info["rewards"][-100:])
logger.record_tabular("% completion", completion)
logger.record_tabular("steps", info["steps"])
logger.record_tabular("iters", num_iters)
logger.record_tabular("episodes", len(info["rewards"]))
logger.record_tabular(
"reward (100 epi mean)", np.mean(info["rewards"][-100:])
)
if not args.noisy:
logger.record_tabular("exploration", exploration.value(num_iters))
if args.prioritized:
logger.record_tabular("max priority", replay_buffer._max_priority)
fps_estimate = (
float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
if steps_per_iter._value is not None
else "calculating:"
)
logger.dump_tabular()
logger.log()
logger.log("ETA: " + pretty_eta(int(steps_left / fps_estimate)))
logger.log()
# add summary for one episode
ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length], num_iters)
ep_length = 0
| mit | 7f6e355a8bfd7784bcc9ff9e03e33f39 | 34.848276 | 88 | 0.542773 | 4.035714 | false | false | false | false |
alkaline-ml/pmdarima | pmdarima/utils/metaestimators.py | 2 | 3457 | # -*- coding: utf-8 -*-
#
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
#
# Metaestimators for the ARIMA class. These classes are derived from the
# sklearn metaestimators, but adapted for more specific use with pmdarima.
from operator import attrgetter
from functools import update_wrapper
__all__ = [
'if_has_delegate'
]
class _IffHasDelegate(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if none of the delegates (specified in ``delegate_names``) is an attribute
of the base object or the first found delegate does not have an attribute
``attribute_name``.
This allows ducktyping of the decorated method based on
``delegate.attribute_name``. Here ``delegate`` is the first item in
``delegate_names`` for which ``hasattr(object, delegate) is True``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, delegate_names):
self.fn = fn
self.delegate_names = delegate_names
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
for delegate_name in self.delegate_names:
try:
attrgetter(delegate_name)(obj)
except AttributeError:
continue
else:
break
else:
attrgetter(self.delegate_names[-1])(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = (lambda *args, **kwargs: self.fn(obj, *args, **kwargs))
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_has_delegate(delegate):
"""Wrap a delegated instance attribute function.
Creates a decorator for methods that are delegated in the presence of a
results wrapper. This enables duck-typing by ``hasattr`` returning True
according to the sub-estimator.
This function was adapted from scikit-learn, which defines
``if_delegate_has_method``, but operates differently by injecting methods
not based on method presence, but by delegate presence.
Examples
--------
>>> from pmdarima.utils.metaestimators import if_has_delegate
>>>
>>> class A(object):
... @if_has_delegate('d')
... def func(self):
... return True
>>>
>>> a = A()
>>> # the delegate does not exist yet
>>> assert not hasattr(a, 'func')
>>> # inject the attribute
>>> a.d = None
>>> assert hasattr(a, 'func') and a.func()
Parameters
----------
delegate : string, list of strings or tuple of strings
Name of the sub-estimator that can be accessed as an attribute of the
base object. If a list or a tuple of names are provided, the first
sub-estimator that is an attribute of the base object will be used.
"""
if isinstance(delegate, list):
delegate = tuple(delegate)
if not isinstance(delegate, tuple):
delegate = (delegate,)
return lambda fn: _IffHasDelegate(fn, delegate)
| mit | 954de5efff0ed2e06eb236095fdcf3cd | 34.27551 | 79 | 0.637836 | 4.364899 | false | false | false | false |
alkaline-ml/pmdarima | pmdarima/preprocessing/exog/base.py | 2 | 3245 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import abc
from ..base import BaseTransformer
class BaseExogTransformer(BaseTransformer, metaclass=abc.ABCMeta):
"""A base class for exogenous array transformers"""
def _check_y_X(self, y, X, null_allowed=False):
"""Check the endog and exog arrays"""
y, X = super(BaseExogTransformer, self)._check_y_X(y, X)
if X is None and not null_allowed:
raise ValueError("X must be non-None for exog transformers")
return y, X
class BaseExogFeaturizer(BaseExogTransformer, metaclass=abc.ABCMeta):
"""Transformers that create new exog features from the endog or exog array
Parameters
----------
prefix : str or None, optional (default=None)
The feature prefix
"""
def __init__(self, prefix=None):
self.prefix = prefix
@abc.abstractmethod
def _get_prefix(self):
"""Get the feature prefix for when exog is a pd.DataFrame"""
def _get_feature_names(self, X):
pfx = self._get_prefix()
return ['%s_%i' % (pfx, i) for i in range(X.shape[1])]
def _safe_hstack(self, X, features):
"""H-stack dataframes or np.ndarrays"""
if X is None or isinstance(X, pd.DataFrame):
# the features we're adding may be np.ndarray
if not isinstance(features, pd.DataFrame):
features = pd.DataFrame.from_records(features)
# subclass may override this
features.columns = self._get_feature_names(features)
if X is not None:
# ignore_index will remove names, which is a stupid quirk
# of pandas... so manually reset the indices
# https://stackoverflow.com/a/43406062/3015734
X.index = features.index = np.arange(X.shape[0])
return pd.concat([X, features], axis=1)
# if X was None coming in, we'd still like to favor a pd.DF
return features
return np.hstack([X, features])
def transform(self, y, X=None, n_periods=0, **kwargs):
"""Transform the new array
Apply the transformation to the array after learning the training set's
characteristics in the ``fit`` method. The transform method for
featurizers behaves slightly differently in that the ``n_periods` may
be required to extrapolate for periods in the future.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features)
An array of additional covariates.
n_periods : int, optional (default=0)
The number of periods in the future to forecast. If ``n_periods``
is 0, will compute the features for the training set.
``n_periods`` corresponds to the number of samples that will be
returned.
**kwargs : keyword args
Keyword arguments required by the transform function.
Returns
-------
y : array-like or None
The transformed y array
X : array-like or None
The transformed X array
"""
| mit | cb89a54cb19e5352bb3816c4a93450fc | 33.892473 | 79 | 0.605547 | 4.214286 | false | false | false | false |
alkaline-ml/pmdarima | pmdarima/arima/_validation.py | 2 | 5090 | # -*- coding: utf-8 -*-
"""
Arg validation for auto-arima calls. This allows us to test validation more
directly without having to fit numerous combinations of models.
"""
import numpy as np
import warnings
from sklearn import metrics
from pmdarima.warnings import ModelFitWarning
# The valid information criteria
VALID_CRITERIA = {'aic', 'aicc', 'bic', 'hqic', 'oob'}
def auto_intercept(with_intercept, default):
"""A more concise way to handle the default behavior of with_intercept"""
if with_intercept == "auto":
return default
return with_intercept
def check_information_criterion(information_criterion, out_of_sample_size):
"""Check whether the information criterion is valid"""
if information_criterion not in VALID_CRITERIA:
raise ValueError('auto_arima not defined for information_criteria=%s. '
'Valid information criteria include: %r'
% (information_criterion, VALID_CRITERIA))
# check on information criterion and out_of_sample size
if information_criterion == 'oob' and out_of_sample_size == 0:
information_criterion = 'aic'
warnings.warn('information_criterion cannot be \'oob\' with '
'out_of_sample_size = 0. '
'Falling back to information criterion = aic.')
return information_criterion
def check_kwargs(kwargs):
"""Return kwargs or an empty dict.
We often pass named kwargs (like `sarimax_kwargs`) as None by default. This
is to avoid a mutable default, which can bite you in unexpected ways. This
will return a kwarg-compatible value.
"""
if kwargs:
return kwargs
return {}
def check_m(m, seasonal):
"""Check the value of M (seasonal periodicity)"""
if (m < 1 and seasonal) or m < 0:
raise ValueError('m must be a positive integer (> 0)')
if not seasonal:
# default m is 1, so if it's the default, don't warn
if m > 1:
warnings.warn("m (%i) set for non-seasonal fit. Setting to 0" % m)
m = 0
return m
def check_n_jobs(stepwise, n_jobs):
"""Potentially update the n_jobs parameter
We can't run in parallel with the stepwise algorithm. This checks
``n_jobs`` w.r.t. stepwise and will warn.
"""
if stepwise and n_jobs != 1:
n_jobs = 1
warnings.warn('stepwise model cannot be fit in parallel (n_jobs=%i). '
'Falling back to stepwise parameter search.' % n_jobs)
return n_jobs
def check_start_max_values(st, mx, argname):
"""Ensure starting points and ending points are valid"""
if mx is None:
mx = np.inf
if st is None:
raise ValueError("start_%s cannot be None" % argname)
if st < 0:
raise ValueError("start_%s must be positive" % argname)
if mx < st:
raise ValueError("max_%s must be >= start_%s" % (argname, argname))
return st, mx
def check_trace(trace):
"""Check the value of trace"""
if trace is None:
return 0
if isinstance(trace, (int, bool)):
return int(trace)
# otherwise just be truthy with it
if trace:
return 1
return 0
def get_scoring_metric(metric):
"""Get a scoring metric by name, or passthrough a callable
Parameters
----------
metric : str or callable
A name of a scoring metric, or a custom callable function. If it is a
callable, it must adhere to the signature::
def func(y_true, y_pred)
Note that the ARIMA model selection seeks to MINIMIZE the score, and it
is up to the user to ensure that scoring methods that return maximizing
criteria (i.e., ``r2_score``) are wrapped in a function that will
return the negative value of the score.
"""
if isinstance(metric, str):
# XXX: legacy support, remap mse/mae to their long versions
if metric == "mse":
return metrics.mean_squared_error
if metric == "mae":
return metrics.mean_absolute_error
try:
return getattr(metrics, metric)
except AttributeError:
raise ValueError("'%s' is not a valid scoring method." % metric)
if not callable(metric):
raise TypeError("`metric` must be a valid scoring method, or a "
"callable, but got type=%s" % type(metric))
# TODO: warn for potentially invalid signature?
return metric
def warn_for_D(d, D):
"""Warn for large values of D"""
if D >= 2:
warnings.warn("Having more than one seasonal differences is "
"not recommended. Please consider using only one "
"seasonal difference.", ModelFitWarning)
# if D is -1, this will be off, so we include the OR
# TODO: @FutureTayTay.. how can D be -1?
elif D + d > 2 or d > 2:
warnings.warn("Having 3 or more differencing operations is not "
"recommended. Please consider reducing the total "
"number of differences.", ModelFitWarning)
| mit | 18c62c51838bb8cde71a68c921cf8ae6 | 32.051948 | 79 | 0.623183 | 4.072 | false | false | false | false |
eandersson/amqpstorm | amqpstorm/channel0.py | 1 | 7724 | """AMQPStorm Connection.Channel0."""
import logging
import platform
from pamqp import specification
from pamqp.heartbeat import Heartbeat
from amqpstorm import __version__
from amqpstorm.base import LOCALE
from amqpstorm.base import MAX_CHANNELS
from amqpstorm.base import MAX_FRAME_SIZE
from amqpstorm.base import Stateful
from amqpstorm.compatibility import try_utf8_decode
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class Channel0(object):
"""Internal Channel0 handler."""
def __init__(self, connection, client_properties=None):
super(Channel0, self).__init__()
self.is_blocked = False
self.max_allowed_channels = MAX_CHANNELS
self.max_frame_size = MAX_FRAME_SIZE
self.server_properties = {}
self._connection = connection
self._heartbeat = connection.parameters['heartbeat']
self._parameters = connection.parameters
self._override_client_properties = client_properties
def on_frame(self, frame_in):
"""Handle frames sent to Channel0.
:param frame_in: Amqp frame.
:return:
"""
LOGGER.debug('Frame Received: %s', frame_in.name)
if frame_in.name == 'Heartbeat':
return
elif frame_in.name == 'Connection.Close':
self._close_connection(frame_in)
elif frame_in.name == 'Connection.CloseOk':
self._close_connection_ok()
elif frame_in.name == 'Connection.Blocked':
self._blocked_connection(frame_in)
elif frame_in.name == 'Connection.Unblocked':
self._unblocked_connection()
elif frame_in.name == 'Connection.OpenOk':
self._set_connection_state(Stateful.OPEN)
elif frame_in.name == 'Connection.Start':
self.server_properties = frame_in.server_properties
self._send_start_ok(frame_in)
elif frame_in.name == 'Connection.Tune':
self._send_tune_ok(frame_in)
self._send_open_connection()
else:
LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
def send_close_connection(self):
"""Send Connection Close frame.
:return:
"""
self._write_frame(specification.Connection.Close())
def send_heartbeat(self):
"""Send Heartbeat frame.
:return:
"""
if not self._connection.is_open:
return
self._write_frame(Heartbeat())
def _close_connection(self, frame_in):
"""Connection Close.
:param specification.Connection.Close frame_in: Amqp frame.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
if frame_in.reply_code != 200:
reply_text = try_utf8_decode(frame_in.reply_text)
message = (
'Connection was closed by remote server: %s' % reply_text
)
exception = AMQPConnectionError(message,
reply_code=frame_in.reply_code)
self._connection.exceptions.append(exception)
def _close_connection_ok(self):
"""Connection CloseOk frame received.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
def _blocked_connection(self, frame_in):
"""Connection is Blocked.
:param frame_in:
:return:
"""
self.is_blocked = True
LOGGER.warning(
'Connection is blocked by remote server: %s',
try_utf8_decode(frame_in.reason)
)
def _negotiate(self, server_value, client_value):
"""Negotiate the highest supported value. Fall back on the
client side value if zero.
:param int server_value: Server Side value
:param int client_value: Client Side value
:rtype: int
:return:
"""
return min(server_value, client_value) or client_value
def _unblocked_connection(self):
"""Connection is Unblocked.
:return:
"""
self.is_blocked = False
LOGGER.info('Connection is no longer blocked by remote server')
def _plain_credentials(self):
"""AMQP Plain Credentials.
:rtype: str
"""
return '\0%s\0%s' % (self._parameters['username'],
self._parameters['password'])
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
mechanisms = try_utf8_decode(frame_in.mechanisms)
if 'EXTERNAL' in mechanisms:
mechanism = 'EXTERNAL'
credentials = '\0\0'
elif 'PLAIN' in mechanisms:
mechanism = 'PLAIN'
credentials = self._plain_credentials()
else:
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
frame_in.mechanisms
)
self._connection.exceptions.append(exception)
return
start_ok_frame = specification.Connection.StartOk(
mechanism=mechanism,
client_properties=self._client_properties(),
response=credentials,
locale=LOCALE
)
self._write_frame(start_ok_frame)
def _send_tune_ok(self, frame_in):
"""Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
"""
self.max_allowed_channels = self._negotiate(frame_in.channel_max,
MAX_CHANNELS)
self.max_frame_size = self._negotiate(frame_in.frame_max,
MAX_FRAME_SIZE)
LOGGER.debug(
'Negotiated max frame size %d, max channels %d',
self.max_frame_size, self.max_allowed_channels
)
tune_ok_frame = specification.Connection.TuneOk(
channel_max=self.max_allowed_channels,
frame_max=self.max_frame_size,
heartbeat=self._heartbeat)
self._write_frame(tune_ok_frame)
def _send_open_connection(self):
"""Send Open Connection frame.
:return:
"""
open_frame = specification.Connection.Open(
virtual_host=self._parameters['virtual_host']
)
self._write_frame(open_frame)
def _set_connection_state(self, state):
"""Set Connection state.
:param state:
:return:
"""
self._connection.set_state(state)
def _write_frame(self, frame_out):
"""Write a pamqp frame from Channel0.
:param frame_out: Amqp frame.
:return:
"""
self._connection.write_frame(0, frame_out)
LOGGER.debug('Frame Sent: %s', frame_out.name)
def _client_properties(self):
"""AMQPStorm Client Properties.
:rtype: dict
"""
client_properties = {
'product': 'AMQPStorm',
'platform': 'Python %s (%s)' % (platform.python_version(),
platform.python_implementation()),
'capabilities': {
'basic.nack': True,
'connection.blocked': True,
'publisher_confirms': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'information': 'See https://github.com/eandersson/amqpstorm',
'version': __version__
}
if self._override_client_properties:
client_properties.update(self._override_client_properties)
return client_properties
| mit | 373573236e5e0407ac501664ba935089 | 31.317992 | 78 | 0.573019 | 4.195546 | false | false | false | false |
eandersson/amqpstorm | docs/conf.py | 1 | 11963 | # -*- coding: utf-8 -*-
#
# amqpstorm documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 10 16:25:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
sys.path.insert(0, '../')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'amqpstorm'
copyright = u'2020, Erik Olof Gunnar Andersson'
author = u'Erik Olof Gunnar Andersson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import amqpstorm # noqa
import amqpstorm.management # noqa
release = amqpstorm.__version__
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set 'language' from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as 'system message' paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'canonical_url': 'https://www.amqpstorm.io/',
}
github_url = 'https://github.com/eandersson/amqpstorm'
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# '<project> v<release> documentation' by default.
# html_title = u'amqpstorm v'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16
# or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, 'Created using Sphinx' is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, '(C) Copyright ...' is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. '.xhtml').
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'amqpstormdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'amqpstorm.tex', u'amqpstorm Documentation',
u'Erik Olof Gunnar Andersson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For 'manual' documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'amqpstorm', u'amqpstorm Documentation',
[u'Erik Olof Gunnar Andersson'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'amqpstorm', u'amqpstorm Documentation',
u'Erik Olof Gunnar Andersson', 'amqpstorm',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the 'Top' node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
autodoc_member_order = 'bysource'
highlight_language = 'python3'
| mit | 6d2c5209e80bf1ab6c9d0dec90607d91 | 30.901333 | 79 | 0.703502 | 3.639489 | false | true | false | false |
kivy/kivy | kivy/uix/behaviors/__init__.py | 9 | 3531 | '''
Behaviors
=========
.. versionadded:: 1.8.0
Behavior mixin classes
----------------------
This module implements behaviors that can be
`mixed in <https://en.wikipedia.org/wiki/Mixin>`_
with existing base widgets. The idea behind these classes is to encapsulate
properties and events associated with certain types of widgets.
Isolating these properties and events in a mixin class allows you to define
your own implementation for standard kivy widgets that can act as drop-in
replacements. This means you can re-style and re-define widgets as desired
without breaking compatibility: as long as they implement the behaviors
correctly, they can simply replace the standard widgets.
Adding behaviors
----------------
Say you want to add :class:`~kivy.uix.button.Button` capabilities to an
:class:`~kivy.uix.image.Image`, you could do::
class IconButton(ButtonBehavior, Image):
pass
This would give you an :class:`~kivy.uix.image.Image` with the events and
properties inherited from :class:`ButtonBehavior`. For example, the *on_press*
and *on_release* events would be fired when appropriate::
class IconButton(ButtonBehavior, Image):
def on_press(self):
print("on_press")
Or in kv:
.. code-block:: kv
IconButton:
on_press: print('on_press')
Naturally, you could also bind to any property changes the behavior class
offers:
.. code-block:: python
def state_changed(*args):
print('state changed')
button = IconButton()
button.bind(state=state_changed)
.. note::
The behavior class must always be _before_ the widget class. If you don't
specify the inheritance in this order, the behavior will not work because
the behavior methods are overwritten by the class method listed first.
Similarly, if you combine a behavior class with a class which
requires the use of the methods also defined by the behavior class, the
resulting class may not function properly. For example, when combining the
:class:`ButtonBehavior` with a :class:`~kivy.uix.slider.Slider`, both of
which use the :meth:`~kivy.uix.widget.Widget.on_touch_up` method,
the resulting class may not work properly.
.. versionchanged:: 1.9.1
The individual behavior classes, previously in one big `behaviors.py`
file, has been split into a single file for each class under the
:mod:`~kivy.uix.behaviors` module. All the behaviors are still imported
in the :mod:`~kivy.uix.behaviors` module so they are accessible as before
(e.g. both `from kivy.uix.behaviors import ButtonBehavior` and
`from kivy.uix.behaviors.button import ButtonBehavior` work).
'''
__all__ = ('ButtonBehavior', 'ToggleButtonBehavior', 'DragBehavior',
'FocusBehavior', 'CompoundSelectionBehavior',
'CodeNavigationBehavior', 'EmacsBehavior', 'CoverBehavior',
'TouchRippleBehavior', 'TouchRippleButtonBehavior')
from kivy.uix.behaviors.button import ButtonBehavior
from kivy.uix.behaviors.togglebutton import ToggleButtonBehavior
from kivy.uix.behaviors.drag import DragBehavior
from kivy.uix.behaviors.focus import FocusBehavior
from kivy.uix.behaviors.compoundselection import CompoundSelectionBehavior
from kivy.uix.behaviors.codenavigation import CodeNavigationBehavior
from kivy.uix.behaviors.emacs import EmacsBehavior
from kivy.uix.behaviors.cover import CoverBehavior
from kivy.uix.behaviors.touchripple import TouchRippleBehavior
from kivy.uix.behaviors.touchripple import TouchRippleButtonBehavior
| mit | 380ca948622ac61895bc7cbb8df6d16b | 36.168421 | 78 | 0.744265 | 3.967416 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/urllib3/contrib/ntlmpool.py | 711 | 4741 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| mit | 045fe24cedffaae86fc71c7e42b33ef1 | 38.508333 | 77 | 0.563805 | 4.233036 | false | false | false | false |
kivy/kivy | kivy/animation.py | 4 | 25321 | '''
Animation
=========
:class:`Animation` and :class:`AnimationTransition` are used to animate
:class:`~kivy.uix.widget.Widget` properties. You must specify at least a
property name and target value. To use an Animation, follow these steps:
* Setup an Animation object
* Use the Animation object on a Widget
Simple animation
----------------
To animate a Widget's x or y position, simply specify the target x/y values
where you want the widget positioned at the end of the animation::
anim = Animation(x=100, y=100)
anim.start(widget)
The animation will last for 1 second unless :attr:`duration` is specified.
When anim.start() is called, the Widget will move smoothly from the current
x/y position to (100, 100).
Multiple properties and transitions
-----------------------------------
You can animate multiple properties and use built-in or custom transition
functions using :attr:`transition` (or the `t=` shortcut). For example,
to animate the position and size using the 'in_quad' transition::
anim = Animation(x=50, size=(80, 80), t='in_quad')
anim.start(widget)
Note that the `t=` parameter can be the string name of a method in the
:class:`AnimationTransition` class or your own animation function.
Sequential animation
--------------------
To join animations sequentially, use the '+' operator. The following example
will animate to x=50 over 1 second, then animate the size to (80, 80) over the
next two seconds::
anim = Animation(x=50) + Animation(size=(80, 80), duration=2.)
anim.start(widget)
Parallel animation
------------------
To join animations in parallel, use the '&' operator. The following example
will animate the position to (80, 10) over 1 second, whilst in parallel
animating the size to (800, 800)::
anim = Animation(pos=(80, 10))
anim &= Animation(size=(800, 800), duration=2.)
anim.start(widget)
Keep in mind that creating overlapping animations on the same property may have
unexpected results. If you want to apply multiple animations to the same
property, you should either schedule them sequentially (via the '+' operator or
using the *on_complete* callback) or cancel previous animations using the
:attr:`~Animation.cancel_all` method.
Repeating animation
-------------------
.. versionadded:: 1.8.0
.. note::
This is currently only implemented for 'Sequence' animations.
To set an animation to repeat, simply set the :attr:`Sequence.repeat`
property to `True`::
anim = Animation(...) + Animation(...)
anim.repeat = True
anim.start(widget)
For flow control of animations such as stopping and cancelling, use the methods
already in place in the animation module.
'''
__all__ = ('Animation', 'AnimationTransition')
from math import sqrt, cos, sin, pi
from collections import ChainMap
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.compat import string_types, iterkeys
from kivy.weakproxy import WeakProxy
class Animation(EventDispatcher):
'''Create an animation definition that can be used to animate a Widget.
:Parameters:
`duration` or `d`: float, defaults to 1.
Duration of the animation, in seconds.
`transition` or `t`: str or func
Transition function for animate properties. It can be the name of a
method from :class:`AnimationTransition`.
`step` or `s`: float
Step in milliseconds of the animation. Defaults to 0, which means
the animation is updated for every frame.
To update the animation less often, set the step value to a float.
For example, if you want to animate at 30 FPS, use s=1/30.
:Events:
`on_start`: animation, widget
Fired when the animation is started on a widget.
`on_complete`: animation, widget
Fired when the animation is completed or stopped on a widget.
`on_progress`: animation, widget, progression
Fired when the progression of the animation is changing.
.. versionchanged:: 1.4.0
Added s/step parameter.
.. versionchanged:: 1.10.0
The default value of the step parameter was changed from 1/60. to 0.
'''
_update_ev = None
_instances = set()
__events__ = ('on_start', 'on_progress', 'on_complete')
def __init__(self, **kw):
super().__init__()
# Initialize
self._clock_installed = False
self._duration = kw.pop('d', kw.pop('duration', 1.))
self._transition = kw.pop('t', kw.pop('transition', 'linear'))
self._step = kw.pop('s', kw.pop('step', 0))
if isinstance(self._transition, string_types):
self._transition = getattr(AnimationTransition, self._transition)
self._animated_properties = kw
self._widgets = {}
@property
def duration(self):
'''Return the duration of the animation.
'''
return self._duration
@property
def transition(self):
'''Return the transition of the animation.
'''
return self._transition
@property
def animated_properties(self):
'''Return the properties used to animate.
'''
return self._animated_properties
@staticmethod
def stop_all(widget, *largs):
'''Stop all animations that concern a specific widget / list of
properties.
Example::
anim = Animation(x=50)
anim.start(widget)
# and later
Animation.stop_all(widget, 'x')
'''
if len(largs):
for animation in list(Animation._instances):
for x in largs:
animation.stop_property(widget, x)
else:
for animation in set(Animation._instances):
animation.stop(widget)
@staticmethod
def cancel_all(widget, *largs):
'''Cancel all animations that concern a specific widget / list of
properties. See :attr:`cancel`.
Example::
anim = Animation(x=50)
anim.start(widget)
# and later
Animation.cancel_all(widget, 'x')
.. versionadded:: 1.4.0
.. versionchanged:: 2.1.0
If the parameter ``widget`` is None, all animated widgets will be
the target and cancelled. If ``largs`` is also given, animation of
these properties will be canceled for all animated widgets.
'''
if widget is None:
if largs:
for animation in Animation._instances.copy():
for info in tuple(animation._widgets.values()):
widget = info['widget']
for x in largs:
animation.cancel_property(widget, x)
else:
for animation in Animation._instances:
animation._widgets.clear()
animation._clock_uninstall()
Animation._instances.clear()
return
if len(largs):
for animation in list(Animation._instances):
for x in largs:
animation.cancel_property(widget, x)
else:
for animation in set(Animation._instances):
animation.cancel(widget)
def start(self, widget):
'''Start the animation on a widget.
'''
self.stop(widget)
self._initialize(widget)
self._register()
self.dispatch('on_start', widget)
def stop(self, widget):
'''Stop the animation previously applied to a widget, triggering the
`on_complete` event.'''
props = self._widgets.pop(widget.uid, None)
if props:
self.dispatch('on_complete', widget)
self.cancel(widget)
def cancel(self, widget):
'''Cancel the animation previously applied to a widget. Same
effect as :attr:`stop`, except the `on_complete` event will
*not* be triggered!
.. versionadded:: 1.4.0
'''
self._widgets.pop(widget.uid, None)
self._clock_uninstall()
if not self._widgets:
self._unregister()
def stop_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be stopped (see :attr:`stop`).
'''
props = self._widgets.get(widget.uid, None)
if not props:
return
props['properties'].pop(prop, None)
# no more properties to animation ? kill the animation.
if not props['properties']:
self.stop(widget)
def cancel_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be canceled (see :attr:`cancel`)
.. versionadded:: 1.4.0
'''
props = self._widgets.get(widget.uid, None)
if not props:
return
props['properties'].pop(prop, None)
# no more properties to animation ? kill the animation.
if not props['properties']:
self.cancel(widget)
def have_properties_to_animate(self, widget):
'''Return True if a widget still has properties to animate.
.. versionadded:: 1.8.0
'''
props = self._widgets.get(widget.uid, None)
if props and props['properties']:
return True
#
# Private
#
def _register(self):
Animation._instances.add(self)
def _unregister(self):
if self in Animation._instances:
Animation._instances.remove(self)
def _initialize(self, widget):
d = self._widgets[widget.uid] = {
'widget': widget,
'properties': {},
'time': None}
# get current values
p = d['properties']
for key, value in self._animated_properties.items():
original_value = getattr(widget, key)
if isinstance(original_value, (tuple, list)):
original_value = original_value[:]
elif isinstance(original_value, dict):
original_value = original_value.copy()
p[key] = (original_value, value)
# install clock
self._clock_install()
def _clock_install(self):
if self._clock_installed:
return
self._update_ev = Clock.schedule_interval(self._update, self._step)
self._clock_installed = True
def _clock_uninstall(self):
if self._widgets or not self._clock_installed:
return
self._clock_installed = False
if self._update_ev is not None:
self._update_ev.cancel()
self._update_ev = None
def _update(self, dt):
widgets = self._widgets
transition = self._transition
calculate = self._calculate
for uid in list(widgets.keys()):
anim = widgets[uid]
widget = anim['widget']
if isinstance(widget, WeakProxy) and not len(dir(widget)):
# empty proxy, widget is gone. ref: #2458
self._widgets.pop(uid, None)
self._clock_uninstall()
if not self._widgets:
self._unregister()
continue
if anim['time'] is None:
anim['time'] = 0.
else:
anim['time'] += dt
# calculate progression
if self._duration:
progress = min(1., anim['time'] / self._duration)
else:
progress = 1
t = transition(progress)
# apply progression on widget
for key, values in anim['properties'].items():
a, b = values
value = calculate(a, b, t)
setattr(widget, key, value)
self.dispatch('on_progress', widget, progress)
# time to stop ?
if progress >= 1.:
self.stop(widget)
def _calculate(self, a, b, t):
_calculate = self._calculate
if isinstance(a, list) or isinstance(a, tuple):
if isinstance(a, list):
tp = list
else:
tp = tuple
return tp([_calculate(a[x], b[x], t) for x in range(len(a))])
elif isinstance(a, dict):
d = {}
for x in iterkeys(a):
if x not in b:
# User requested to animate only part of the dict.
# Copy the rest
d[x] = a[x]
else:
d[x] = _calculate(a[x], b[x], t)
return d
else:
return (a * (1. - t)) + (b * t)
#
# Default handlers
#
def on_start(self, widget):
pass
def on_progress(self, widget, progress):
pass
def on_complete(self, widget):
pass
def __add__(self, animation):
return Sequence(self, animation)
def __and__(self, animation):
return Parallel(self, animation)
class CompoundAnimation(Animation):
def stop_property(self, widget, prop):
self.anim1.stop_property(widget, prop)
self.anim2.stop_property(widget, prop)
if (not self.anim1.have_properties_to_animate(widget) and
not self.anim2.have_properties_to_animate(widget)):
self.stop(widget)
def cancel(self, widget):
self.anim1.cancel(widget)
self.anim2.cancel(widget)
super().cancel(widget)
def cancel_property(self, widget, prop):
'''Even if an animation is running, remove a property. It will not be
animated further. If it was the only/last property being animated,
the animation will be canceled (see :attr:`cancel`)
This method overrides `:class:kivy.animation.Animation`'s
version, to cancel it on all animations of the Sequence.
.. versionadded:: 1.10.0
'''
self.anim1.cancel_property(widget, prop)
self.anim2.cancel_property(widget, prop)
if (not self.anim1.have_properties_to_animate(widget) and
not self.anim2.have_properties_to_animate(widget)):
self.cancel(widget)
def have_properties_to_animate(self, widget):
return (self.anim1.have_properties_to_animate(widget) or
self.anim2.have_properties_to_animate(widget))
@property
def animated_properties(self):
return ChainMap({},
self.anim2.animated_properties,
self.anim1.animated_properties)
@property
def transition(self):
# This property is impossible to implement
raise AttributeError(
"Can't lookup transition attribute of a CompoundAnimation")
class Sequence(CompoundAnimation):
def __init__(self, anim1, anim2):
super().__init__()
#: Repeat the sequence. See 'Repeating animation' in the header
#: documentation.
self.repeat = False
self.anim1 = anim1
self.anim2 = anim2
self.anim1.bind(on_complete=self.on_anim1_complete,
on_progress=self.on_anim1_progress)
self.anim2.bind(on_complete=self.on_anim2_complete,
on_progress=self.on_anim2_progress)
@property
def duration(self):
return self.anim1.duration + self.anim2.duration
def stop(self, widget):
props = self._widgets.pop(widget.uid, None)
self.anim1.stop(widget)
self.anim2.stop(widget)
if props:
self.dispatch('on_complete', widget)
super().cancel(widget)
def start(self, widget):
self.stop(widget)
self._widgets[widget.uid] = True
self._register()
self.dispatch('on_start', widget)
self.anim1.start(widget)
def on_anim1_complete(self, instance, widget):
if widget.uid not in self._widgets:
return
self.anim2.start(widget)
def on_anim1_progress(self, instance, widget, progress):
self.dispatch('on_progress', widget, progress / 2.)
def on_anim2_complete(self, instance, widget):
'''Repeating logic used with boolean variable "repeat".
.. versionadded:: 1.7.1
'''
if widget.uid not in self._widgets:
return
if self.repeat:
self.anim1.start(widget)
else:
self.dispatch('on_complete', widget)
self.cancel(widget)
def on_anim2_progress(self, instance, widget, progress):
self.dispatch('on_progress', widget, .5 + progress / 2.)
class Parallel(CompoundAnimation):
def __init__(self, anim1, anim2):
super().__init__()
self.anim1 = anim1
self.anim2 = anim2
self.anim1.bind(on_complete=self.on_anim_complete)
self.anim2.bind(on_complete=self.on_anim_complete)
@property
def duration(self):
return max(self.anim1.duration, self.anim2.duration)
def stop(self, widget):
self.anim1.stop(widget)
self.anim2.stop(widget)
if self._widgets.pop(widget.uid, None):
self.dispatch('on_complete', widget)
super().cancel(widget)
def start(self, widget):
self.stop(widget)
self.anim1.start(widget)
self.anim2.start(widget)
self._widgets[widget.uid] = {'complete': 0}
self._register()
self.dispatch('on_start', widget)
def on_anim_complete(self, instance, widget):
self._widgets[widget.uid]['complete'] += 1
if self._widgets[widget.uid]['complete'] == 2:
self.stop(widget)
class AnimationTransition:
'''Collection of animation functions to be used with the Animation object.
Easing Functions ported to Kivy from the Clutter Project
https://developer.gnome.org/clutter/stable/ClutterAlpha.html
The `progress` parameter in each animation function is in the range 0-1.
'''
@staticmethod
def linear(progress):
'''.. image:: images/anim_linear.png'''
return progress
@staticmethod
def in_quad(progress):
'''.. image:: images/anim_in_quad.png
'''
return progress * progress
@staticmethod
def out_quad(progress):
'''.. image:: images/anim_out_quad.png
'''
return -1.0 * progress * (progress - 2.0)
@staticmethod
def in_out_quad(progress):
'''.. image:: images/anim_in_out_quad.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p
p -= 1.0
return -0.5 * (p * (p - 2.0) - 1.0)
@staticmethod
def in_cubic(progress):
'''.. image:: images/anim_in_cubic.png
'''
return progress * progress * progress
@staticmethod
def out_cubic(progress):
'''.. image:: images/anim_out_cubic.png
'''
p = progress - 1.0
return p * p * p + 1.0
@staticmethod
def in_out_cubic(progress):
'''.. image:: images/anim_in_out_cubic.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p
p -= 2
return 0.5 * (p * p * p + 2.0)
@staticmethod
def in_quart(progress):
'''.. image:: images/anim_in_quart.png
'''
return progress * progress * progress * progress
@staticmethod
def out_quart(progress):
'''.. image:: images/anim_out_quart.png
'''
p = progress - 1.0
return -1.0 * (p * p * p * p - 1.0)
@staticmethod
def in_out_quart(progress):
'''.. image:: images/anim_in_out_quart.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p * p
p -= 2
return -0.5 * (p * p * p * p - 2.0)
@staticmethod
def in_quint(progress):
'''.. image:: images/anim_in_quint.png
'''
return progress * progress * progress * progress * progress
@staticmethod
def out_quint(progress):
'''.. image:: images/anim_out_quint.png
'''
p = progress - 1.0
return p * p * p * p * p + 1.0
@staticmethod
def in_out_quint(progress):
'''.. image:: images/anim_in_out_quint.png
'''
p = progress * 2
if p < 1:
return 0.5 * p * p * p * p * p
p -= 2.0
return 0.5 * (p * p * p * p * p + 2.0)
@staticmethod
def in_sine(progress):
'''.. image:: images/anim_in_sine.png
'''
return -1.0 * cos(progress * (pi / 2.0)) + 1.0
@staticmethod
def out_sine(progress):
'''.. image:: images/anim_out_sine.png
'''
return sin(progress * (pi / 2.0))
@staticmethod
def in_out_sine(progress):
'''.. image:: images/anim_in_out_sine.png
'''
return -0.5 * (cos(pi * progress) - 1.0)
@staticmethod
def in_expo(progress):
'''.. image:: images/anim_in_expo.png
'''
if progress == 0:
return 0.0
return pow(2, 10 * (progress - 1.0))
@staticmethod
def out_expo(progress):
'''.. image:: images/anim_out_expo.png
'''
if progress == 1.0:
return 1.0
return -pow(2, -10 * progress) + 1.0
@staticmethod
def in_out_expo(progress):
'''.. image:: images/anim_in_out_expo.png
'''
if progress == 0:
return 0.0
if progress == 1.:
return 1.0
p = progress * 2
if p < 1:
return 0.5 * pow(2, 10 * (p - 1.0))
p -= 1.0
return 0.5 * (-pow(2, -10 * p) + 2.0)
@staticmethod
def in_circ(progress):
'''.. image:: images/anim_in_circ.png
'''
return -1.0 * (sqrt(1.0 - progress * progress) - 1.0)
@staticmethod
def out_circ(progress):
'''.. image:: images/anim_out_circ.png
'''
p = progress - 1.0
return sqrt(1.0 - p * p)
@staticmethod
def in_out_circ(progress):
'''.. image:: images/anim_in_out_circ.png
'''
p = progress * 2
if p < 1:
return -0.5 * (sqrt(1.0 - p * p) - 1.0)
p -= 2.0
return 0.5 * (sqrt(1.0 - p * p) + 1.0)
@staticmethod
def in_elastic(progress):
'''.. image:: images/anim_in_elastic.png
'''
p = .3
s = p / 4.0
q = progress
if q == 1:
return 1.0
q -= 1.0
return -(pow(2, 10 * q) * sin((q - s) * (2 * pi) / p))
@staticmethod
def out_elastic(progress):
'''.. image:: images/anim_out_elastic.png
'''
p = .3
s = p / 4.0
q = progress
if q == 1:
return 1.0
return pow(2, -10 * q) * sin((q - s) * (2 * pi) / p) + 1.0
@staticmethod
def in_out_elastic(progress):
'''.. image:: images/anim_in_out_elastic.png
'''
p = .3 * 1.5
s = p / 4.0
q = progress * 2
if q == 2:
return 1.0
if q < 1:
q -= 1.0
return -.5 * (pow(2, 10 * q) * sin((q - s) * (2.0 * pi) / p))
else:
q -= 1.0
return pow(2, -10 * q) * sin((q - s) * (2.0 * pi) / p) * .5 + 1.0
@staticmethod
def in_back(progress):
'''.. image:: images/anim_in_back.png
'''
return progress * progress * ((1.70158 + 1.0) * progress - 1.70158)
@staticmethod
def out_back(progress):
'''.. image:: images/anim_out_back.png
'''
p = progress - 1.0
return p * p * ((1.70158 + 1) * p + 1.70158) + 1.0
@staticmethod
def in_out_back(progress):
'''.. image:: images/anim_in_out_back.png
'''
p = progress * 2.
s = 1.70158 * 1.525
if p < 1:
return 0.5 * (p * p * ((s + 1.0) * p - s))
p -= 2.0
return 0.5 * (p * p * ((s + 1.0) * p + s) + 2.0)
@staticmethod
def _out_bounce_internal(t, d):
p = t / d
if p < (1.0 / 2.75):
return 7.5625 * p * p
elif p < (2.0 / 2.75):
p -= (1.5 / 2.75)
return 7.5625 * p * p + .75
elif p < (2.5 / 2.75):
p -= (2.25 / 2.75)
return 7.5625 * p * p + .9375
else:
p -= (2.625 / 2.75)
return 7.5625 * p * p + .984375
@staticmethod
def _in_bounce_internal(t, d):
return 1.0 - AnimationTransition._out_bounce_internal(d - t, d)
@staticmethod
def in_bounce(progress):
'''.. image:: images/anim_in_bounce.png
'''
return AnimationTransition._in_bounce_internal(progress, 1.)
@staticmethod
def out_bounce(progress):
'''.. image:: images/anim_out_bounce.png
'''
return AnimationTransition._out_bounce_internal(progress, 1.)
@staticmethod
def in_out_bounce(progress):
'''.. image:: images/anim_in_out_bounce.png
'''
p = progress * 2.
if p < 1.:
return AnimationTransition._in_bounce_internal(p, 1.) * .5
return AnimationTransition._out_bounce_internal(p - 1., 1.) * .5 + .5
| mit | 51de0df72c0746bca8fdd475b2369ba3 | 29.433894 | 79 | 0.553967 | 3.918446 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/day_of.py | 1 | 1476 | import webapp2
from template import template
from messages import Message
from social_import import Post
from mentor import MentorRequest
class DayOfHandler(webapp2.RequestHandler):
def get(self, tab='info'):
params = {}
params['bigboard'] = self.request.get('bigboard')
if tab == 'info':
feed = []
for msg in Message.query(Message.show_in_day_of == True).order(-Message.added).fetch(limit=20):
feed.append({"date": msg.added, "type": "message", "message": msg})
for post in Post.query(Post.feed == 'twitter/user/HackAtBrown', Post.is_reply == False).order(-Post.date).fetch(limit=20):
feed.append({"date": post.date, "type": "tweet", "tweet": post})
feed.sort(key=lambda x: x['date'], reverse=True)
params['feed'] = feed[:min(len(feed), 20)]
elif tab == 'requests':
def request_to_dict(req):
return {
"name": req.requester.get().name if req.requester else None,
"issue": req.issue,
"tags": req.tags,
"location": req.location,
"time": req.created
}
params['requests'] = map(request_to_dict, MentorRequest.query().order(-MentorRequest.created).fetch(limit=100))
content = template("day_of/{0}.html".format(tab), params) # TODO: security-ish stuff
if self.request.get('ajax'):
self.response.write(content)
else:
index_params = {
"tab": tab,
"tab_content": content,
"bigboard": self.request.get('bigboard')
}
self.response.write(template("day_of/index.html", index_params))
| mit | 599413e28dd7db896e84fc9742fb0758 | 34.142857 | 125 | 0.665312 | 3.075 | false | false | false | false |
kivy/kivy | doc/sources/sphinxext/autodoc.py | 73 | 1130 | # -*- coding: utf-8 -*-
from sphinx.ext.autodoc import Documenter, ClassDocumenter
from sphinx.ext.autodoc import setup as core_setup
from sphinx.locale import _
class KivyClassDocumenter(ClassDocumenter):
def add_directive_header(self, sig):
if self.doc_as_attr:
self.directivetype = 'attribute'
Documenter.add_directive_header(self, sig)
def fix(mod):
if mod == 'kivy._event':
mod = 'kivy.event'
return mod
# add inheritance info, if wanted
if not self.doc_as_attr and self.options.show_inheritance:
self.add_line('', '<autodoc>')
if len(self.object.__bases__):
bases = [b.__module__ == '__builtin__' and
':class:`%s`' % b.__name__ or
':class:`%s.%s`' % (fix(b.__module__), b.__name__)
for b in self.object.__bases__]
self.add_line(_(' Bases: %s') % ', '.join(bases),
'<autodoc>')
def setup(app):
core_setup(app)
app.add_autodocumenter(KivyClassDocumenter)
| mit | 6c1b3b270ff3f0b524c8e26dfe344ae5 | 36.666667 | 75 | 0.527434 | 3.896552 | false | false | false | false |
kivy/kivy | kivy/input/providers/__init__.py | 21 | 1936 | # pylint: disable=W0611
'''
Providers
=========
'''
import os
from kivy.utils import platform as core_platform
from kivy.logger import Logger
from kivy.setupconfig import USE_SDL2
import kivy.input.providers.tuio
import kivy.input.providers.mouse
platform = core_platform
if platform == 'win' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.wm_touch
import kivy.input.providers.wm_pen
except:
err = 'Input: WM_Touch/WM_Pen not supported by your version of Windows'
Logger.warning(err)
if platform == 'macosx' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.mactouch
except:
err = 'Input: MacMultitouchSupport is not supported by your system'
Logger.exception(err)
if platform == 'linux' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.probesysfs
except:
err = 'Input: ProbeSysfs is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.mtdev
except:
err = 'Input: MTDev is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.hidinput
except:
err = 'Input: HIDInput is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.linuxwacom
except:
err = 'Input: LinuxWacom is not supported by your version of linux'
Logger.exception(err)
if (platform == 'android' and not USE_SDL2) or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.androidjoystick
except:
err = 'Input: AndroidJoystick is not supported by your version ' \
'of linux'
Logger.exception(err)
try:
import kivy.input.providers.leapfinger # NOQA
except:
err = 'Input: LeapFinger is not available on your system'
Logger.exception(err)
| mit | 9794919ef0cfa1ee1072134a09e4fa95 | 27.470588 | 79 | 0.664773 | 3.639098 | false | false | true | false |
kivy/kivy | kivy/modules/screen.py | 4 | 7476 | '''Screen
======
This module changes some environment and configuration variables
to match the density / dpi / screensize of a specific device.
To see a list of the available screenid's, just run::
python main.py -m screen
To simulate a medium-density screen such as the Motorola Droid 2::
python main.py -m screen:droid2
To simulate a high-density screen such as HTC One X, in portrait::
python main.py -m screen:onex,portrait
To simulate the iPad 2 screen::
python main.py -m screen:ipad
If the generated window is too large, you can specify a scale::
python main.py -m screen:note2,portrait,scale=.75
Note that to display your contents correctly on a scaled window you
must consistently use units 'dp' and 'sp' throughout your app. See
:mod:`~kiv.metrics` for more details.
'''
import sys
from os import environ
from kivy.config import Config
from kivy.logger import Logger
# taken from http://en.wikipedia.org/wiki/List_of_displays_by_pixel_density
devices = {
# device: (name, width, height, dpi, density)
'onex': ('HTC One X', 1280, 720, 312, 2),
'one': ('HTC One', 1920, 1080, 468, 3),
'onesv': ('HTC One SV', 800, 480, 216, 1.5),
's3': ('Galaxy SIII', 1280, 720, 306, 2),
'note2': ('Galaxy Note II', 1280, 720, 267, 2),
'droid2': ('Motorola Droid 2', 854, 480, 240, 1.5),
'xoom': ('Motorola Xoom', 1280, 800, 149, 1),
'ipad': ('iPad (1 and 2)', 1024, 768, 132, 1),
'ipad3': ('iPad 3', 2048, 1536, 264, 2),
'iphone4': ('iPhone 4', 960, 640, 326, 2),
'iphone5': ('iPhone 5', 1136, 640, 326, 2),
'xperiae': ('Xperia E', 480, 320, 166, 1),
'nexus4': ('Nexus 4', 1280, 768, 320, 2),
'nexus7': ('Nexus 7 (2012 version)', 1280, 800, 216, 1.325),
'nexus7.2': ('Nexus 7 (2013 version)', 1920, 1200, 323, 2),
# taken from design.google.com/devices
# please consider using another data instead of
# a dict for autocompletion to work
# these are all in landscape
'phone_android_one': ('Android One', 854, 480, 218, 1.5),
'phone_htc_one_m8': ('HTC One M8', 1920, 1080, 432, 3.0),
'phone_htc_one_m9': ('HTC One M9', 1920, 1080, 432, 3.0),
'phone_iphone': ('iPhone', 480, 320, 168, 1.0),
'phone_iphone_4': ('iPhone 4', 960, 640, 320, 2.0),
'phone_iphone_5': ('iPhone 5', 1136, 640, 320, 2.0),
'phone_iphone_6': ('iPhone 6', 1334, 750, 326, 2.0),
'phone_iphone_6_plus': ('iPhone 6 Plus', 1920, 1080, 400, 3.0),
'phone_lg_g2': ('LG G2', 1920, 1080, 432, 3.0),
'phone_lg_g3': ('LG G3', 2560, 1440, 533, 3.0),
'phone_moto_g': ('Moto G', 1280, 720, 327, 2.0),
'phone_moto_x': ('Moto X', 1280, 720, 313, 2.0),
'phone_moto_x_2nd_gen': ('Moto X 2nd Gen', 1920, 1080, 432, 3.0),
'phone_nexus_4': ('Nexus 4', 1280, 768, 240, 2.0),
'phone_nexus_5': ('Nexus 5', 1920, 1080, 450, 3.0),
'phone_nexus_5x': ('Nexus 5X', 1920, 1080, 432, 2.6),
'phone_nexus_6': ('Nexus 6', 2560, 1440, 496, 3.5),
'phone_nexus_6p': ('Nexus 6P', 2560, 1440, 514, 3.5),
'phone_oneplus_3t': ('OnePlus 3t', 1863, 1080, 380, 2.375),
'phone_oneplus_6t': ('OnePlus 6t', 2340, 1080, 420, 2.625),
'phone_samsung_galaxy_note_4': ('Samsung Galaxy Note 4',
2560, 1440, 514, 3.0),
'phone_samsung_galaxy_s5': ('Samsung Galaxy S5', 1920, 1080, 372, 3.0),
'phone_samsung_galaxy_s6': ('Samsung Galaxy S6', 2560, 1440, 576, 4.0),
'phone_sony_xperia_c4': ('Sony Xperia C4', 1920, 1080, 400, 2.0),
'phone_sony_xperia_z_ultra': ('Sony Xperia Z Ultra', 1920, 1080, 348, 2.0),
'phone_sony_xperia_z1_compact': ('Sony Xperia Z1 Compact',
1280, 720, 342, 2.0),
'phone_sony_xperia_z2z3': ('Sony Xperia Z2/Z3', 1920, 1080, 432, 3.0),
'phone_sony_xperia_z3_compact': ('Sony Xperia Z3 Compact',
1280, 720, 313, 2.0),
'tablet_dell_venue_8': ('Dell Venue 8', 2560, 1600, 355, 2.0),
'tablet_ipad': ('iPad', 1024, 768, 132, 1.0),
'tablet_ipad_mini': ('iPad Mini', 1024, 768, 163, 1.0),
'tablet_ipad_mini_retina': ('iPad Mini Retina', 2048, 1536, 326, 2.0),
'tablet_ipad_pro': ('iPad Pro', 2732, 2048, 265, 2.0),
'tablet_ipad_retina': ('iPad Retina', 2048, 1536, 264, 2.0),
'tablet_nexus_10': ('Nexus 10', 2560, 1600, 297, 2.0),
'tablet_nexus_7_12': ('Nexus 7 12', 1280, 800, 216, 1.3),
'tablet_nexus_7_13': ('Nexus 7 13', 1920, 1200, 324, 2.0),
'tablet_nexus_9': ('Nexus 9', 2048, 1536, 288, 2.0),
'tablet_samsung_galaxy_tab_10': ('Samsung Galaxy Tab 10',
1280, 800, 148, 1.0),
'tablet_sony_xperia_z3_tablet': ('Sony Xperia Z3 Tablet',
1920, 1200, 282, 2.0),
'tablet_sony_xperia_z4_tablet': ('Sony Xperia Z4 Tablet',
2560, 1600, 297, 2.0),
'tablet_huawei_mediapad_m3_lite_10': ('HUAWEI MediaPad M3 Lite 10',
1920, 1200, 320, 2.25)
}
def start(win, ctx):
pass
def stop(win, ctx):
pass
def apply_device(device, scale, orientation):
name, width, height, dpi, density = devices[device]
if orientation == 'portrait':
width, height = height, width
Logger.info('Screen: Apply screen settings for {0}'.format(name))
Logger.info('Screen: size={0}x{1} dpi={2} density={3} '
'orientation={4}'.format(width, height, dpi, density,
orientation))
try:
scale = float(scale)
except:
scale = 1
environ['KIVY_METRICS_DENSITY'] = str(density * scale)
environ['KIVY_DPI'] = str(dpi * scale)
Config.set('graphics', 'width', str(int(width * scale)))
# simulate with the android bar
# FIXME should be configurable
Config.set('graphics', 'height', str(int(height * scale - 25 * density)))
Config.set('graphics', 'fullscreen', '0')
Config.set('graphics', 'show_mousecursor', '1')
def usage(device=None):
if device:
Logger.error('Screen: The specified device ({0}) is unknown.',
device)
print('\nModule usage: python main.py -m screen:deviceid[,orientation]\n')
print('Available devices:\n')
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
'Device ID', 'Name', 'Width', 'Height', 'DPI', 'Density'))
for device, info in devices.items():
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
device, *info))
print('\n')
print('Simulate a medium-density screen such as Motorola Droid 2:\n')
print(' python main.py -m screen:droid2\n')
print('Simulate a high-density screen such as HTC One X, in portrait:\n')
print(' python main.py -m screen:onex,portrait\n')
print('Simulate the iPad 2 screen\n')
print(' python main.py -m screen:ipad\n')
print('If the generated window is too large, you can specify a scale:\n')
print(' python main.py -m screen:note2,portrait,scale=.75\n')
sys.exit(1)
def configure(ctx):
scale = ctx.pop('scale', None)
orientation = 'landscape'
ctx.pop('landscape', None)
if ctx.pop('portrait', None):
orientation = 'portrait'
if not ctx:
return usage(None)
device = list(ctx.keys())[0]
if device not in devices:
return usage('')
apply_device(device, scale, orientation)
if __name__ == "__main__":
for n in devices.values():
assert n[1] > n[2]
| mit | a61477d83168c11ef9f567149b1d7bab | 39.852459 | 79 | 0.583333 | 2.80315 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/urllib3/connectionpool.py | 76 | 25347 | # urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': 80,
'https': 443,
}
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
return self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue
raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e:
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| mit | 27dad6268678a78002a3c018b558e4be | 36.220264 | 86 | 0.588275 | 4.617781 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/oauthlib/oauth2/rfc6749/clients/backend_application.py | 85 | 2498 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
from .base import Client
from ..parameters import prepare_token_request
from ..parameters import parse_token_response
class BackendApplicationClient(Client):
"""A public client utilizing the client credentials grant workflow.
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner which has been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients.
Since the client authentication is used as the authorization grant,
no additional authorization request is needed.
"""
def prepare_request_body(self, body='', scope=None, **kwargs):
"""Add the client credentials to the request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per `Appendix B`_ in the HTTP request entity-body:
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param kwargs: Extra credentials to include in the token request.
The client MUST authenticate with the authorization server as
described in `Section 3.2.1`_.
The prepared body will include all provided credentials as well as
the ``grant_type`` parameter set to ``client_credentials``::
>>> from oauthlib.oauth2 import BackendApplicationClient
>>> client = BackendApplicationClient('your_id')
>>> client.prepare_request_body(scope=['hello', 'world'])
'grant_type=client_credentials&scope=hello+world'
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
return prepare_token_request('client_credentials', body=body,
scope=scope, **kwargs)
| mit | a4e66644d0853ff545c38ba23c91821c | 39.95082 | 77 | 0.679744 | 4.509025 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/charade/hebrewprober.py | 208 | 13642 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| mit | 235917645a36b46090995a8524ed3c27 | 46.204947 | 79 | 0.670723 | 3.951912 | false | false | false | false |
kivy/kivy | kivy/uix/slider.py | 4 | 13394 | """
Slider
======
.. image:: images/slider.jpg
The :class:`Slider` widget looks like a scrollbar. It supports horizontal and
vertical orientations, min/max values and a default value.
To create a slider from -100 to 100 starting from 25::
from kivy.uix.slider import Slider
s = Slider(min=-100, max=100, value=25)
To create a vertical slider::
from kivy.uix.slider import Slider
s = Slider(orientation='vertical')
To create a slider with a red line tracking the value::
from kivy.uix.slider import Slider
s = Slider(value_track=True, value_track_color=[1, 0, 0, 1])
Kv Example::
BoxLayout:
Slider:
id: slider
min: 0
max: 100
step: 1
orientation: 'vertical'
Label:
text: str(slider.value)
"""
__all__ = ('Slider', )
from kivy.uix.widget import Widget
from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,
ReferenceListProperty, BoundedNumericProperty,
StringProperty, ListProperty, BooleanProperty,
ColorProperty)
class Slider(Widget):
"""Class for creating a Slider widget.
Check module documentation for more details.
"""
value = NumericProperty(0.)
'''Current value used for the slider.
:attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.'''
min = NumericProperty(0.)
'''Minimum value allowed for :attr:`value`.
:attr:`min` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.'''
max = NumericProperty(100.)
'''Maximum value allowed for :attr:`value`.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.'''
padding = NumericProperty('16sp')
'''Padding of the slider. The padding is used for graphical representation
and interaction. It prevents the cursor from going out of the bounds of the
slider bounding box.
By default, padding is 16sp. The range of the slider is reduced from
padding \\*2 on the screen. It allows drawing the default cursor of 32sp
width without having the cursor go out of the widget.
:attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults
to 16sp.'''
orientation = OptionProperty('horizontal', options=(
'vertical', 'horizontal'))
'''Orientation of the slider.
:attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'. Can take a value of 'vertical' or 'horizontal'.
'''
range = ReferenceListProperty(min, max)
'''Range of the slider in the format (minimum value, maximum value)::
>>> slider = Slider(min=10, max=80)
>>> slider.range
[10, 80]
>>> slider.range = (20, 100)
>>> slider.min
20
>>> slider.max
100
:attr:`range` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`min`, :attr:`max`) properties.
'''
step = BoundedNumericProperty(0, min=0)
'''Step size of the slider.
.. versionadded:: 1.4.0
Determines the size of each interval or step the slider takes between
:attr:`min` and :attr:`max`. If the value range can't be evenly
divisible by step the last step will be capped by slider.max.
A zero value will result in the smallest possible intervals/steps,
calculated from the (pixel) position of the slider.
:attr:`step` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.'''
background_horizontal = StringProperty(
'atlas://data/images/defaulttheme/sliderh_background')
"""Background of the slider used in the horizontal orientation.
.. versionadded:: 1.10.0
:attr:`background_horizontal` is a :class:`~kivy.properties.StringProperty`
and defaults to `atlas://data/images/defaulttheme/sliderh_background`.
"""
background_disabled_horizontal = StringProperty(
'atlas://data/images/defaulttheme/sliderh_background_disabled')
"""Background of the disabled slider used in the horizontal orientation.
.. versionadded:: 1.10.0
:attr:`background_disabled_horizontal` is a
:class:`~kivy.properties.StringProperty` and defaults to
`atlas://data/images/defaulttheme/sliderh_background_disabled`.
"""
background_vertical = StringProperty(
'atlas://data/images/defaulttheme/sliderv_background')
"""Background of the slider used in the vertical orientation.
.. versionadded:: 1.10.0
:attr:`background_vertical` is a :class:`~kivy.properties.StringProperty`
and defaults to `atlas://data/images/defaulttheme/sliderv_background`.
"""
background_disabled_vertical = StringProperty(
'atlas://data/images/defaulttheme/sliderv_background_disabled')
"""Background of the disabled slider used in the vertical orientation.
.. versionadded:: 1.10.0
:attr:`background_disabled_vertical` is a
:class:`~kivy.properties.StringProperty` and defaults to
`atlas://data/images/defaulttheme/sliderv_background_disabled`.
"""
background_width = NumericProperty('36sp')
"""Slider's background's width (thickness), used in both horizontal
and vertical orientations.
.. versionadded 1.10.0
:attr:`background_width` is a
:class:`~kivy.properties.NumericProperty` and defaults to 36sp.
"""
cursor_image = StringProperty(
'atlas://data/images/defaulttheme/slider_cursor')
"""Path of the image used to draw the slider cursor.
.. versionadded 1.10.0
:attr:`cursor_image` is a :class:`~kivy.properties.StringProperty`
and defaults to `atlas://data/images/defaulttheme/slider_cursor`.
"""
cursor_disabled_image = StringProperty(
'atlas://data/images/defaulttheme/slider_cursor_disabled')
"""Path of the image used to draw the disabled slider cursor.
.. versionadded 1.10.0
:attr:`cursor_image` is a :class:`~kivy.properties.StringProperty`
and defaults to `atlas://data/images/defaulttheme/slider_cursor_disabled`.
"""
cursor_width = NumericProperty('32sp')
"""Width of the cursor image.
.. versionadded 1.10.0
:attr:`cursor_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 32sp.
"""
cursor_height = NumericProperty('32sp')
"""Height of the cursor image.
.. versionadded 1.10.0
:attr:`cursor_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 32sp.
"""
cursor_size = ReferenceListProperty(cursor_width, cursor_height)
"""Size of the cursor image.
.. versionadded 1.10.0
:attr:`cursor_size` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`cursor_width`, :attr:`cursor_height`) properties.
"""
border_horizontal = ListProperty([0, 18, 0, 18])
"""Border used to draw the slider background in horizontal orientation.
.. versionadded 1.10.0
:attr:`border_horizontal` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 18, 0, 18].
"""
border_vertical = ListProperty([18, 0, 18, 0])
"""Border used to draw the slider background in vertical orientation.
.. versionadded 1.10.0
:attr:`border_horizontal` is a :class:`~kivy.properties.ListProperty`
and defaults to [18, 0, 18, 0].
"""
value_track = BooleanProperty(False)
"""Decides if slider should draw the line indicating the
space between :attr:`min` and :attr:`value` properties values.
.. versionadded 1.10.0
:attr:`value_track` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
"""
value_track_color = ColorProperty([1, 1, 1, 1])
"""Color of the :attr:`value_line` in rgba format.
.. versionadded 1.10.0
:attr:`value_track_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to [1, 1, 1, 1].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
"""
value_track_width = NumericProperty('3dp')
"""Width of the track line.
.. versionadded 1.10.0
:attr:`value_track_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 3dp.
"""
sensitivity = OptionProperty('all', options=('all', 'handle'))
"""Whether the touch collides with the whole body of the widget
or with the slider handle part only.
.. versionadded:: 1.10.1
:attr:`sensitivity` is a :class:`~kivy.properties.OptionProperty`
and defaults to 'all'. Can take a value of 'all' or 'handle'.
"""
# The following two methods constrain the slider's value
# to range(min,max). Otherwise it may happen that self.value < self.min
# at init.
def on_min(self, *largs):
self.value = min(self.max, max(self.min, self.value))
def on_max(self, *largs):
self.value = min(self.max, max(self.min, self.value))
def get_norm_value(self):
vmin = self.min
d = self.max - vmin
if d == 0:
return 0
return (self.value - vmin) / float(d)
def set_norm_value(self, value):
vmin = self.min
vmax = self.max
step = self.step
val = min(value * (vmax - vmin) + vmin, vmax)
if step == 0:
self.value = val
else:
self.value = min(round((val - vmin) / step) * step + vmin,
vmax)
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'min', 'max'),
cache=True)
'''Normalized value inside the :attr:`range` (min/max) to 0-1 range::
>>> slider = Slider(value=50, min=0, max=100)
>>> slider.value
50
>>> slider.value_normalized
0.5
>>> slider.value = 0
>>> slider.value_normalized
0
>>> slider.value = 100
>>> slider.value_normalized
1
You can also use it for setting the real value without knowing the minimum
and maximum::
>>> slider = Slider(min=0, max=200)
>>> slider.value_normalized = .5
>>> slider.value
100
>>> slider.value_normalized = 1.
>>> slider.value
200
:attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
def get_value_pos(self):
padding = self.padding
x = self.x
y = self.y
nval = self.value_normalized
if self.orientation == 'horizontal':
return (x + padding + nval * (self.width - 2 * padding), y)
else:
return (x, y + padding + nval * (self.height - 2 * padding))
def set_value_pos(self, pos):
padding = self.padding
x = min(self.right - padding, max(pos[0], self.x + padding))
y = min(self.top - padding, max(pos[1], self.y + padding))
if self.orientation == 'horizontal':
if self.width == 0:
self.value_normalized = 0
else:
self.value_normalized = (x - self.x - padding
) / float(self.width - 2 * padding)
else:
if self.height == 0:
self.value_normalized = 0
else:
self.value_normalized = (y - self.y - padding
) / float(self.height - 2 * padding)
value_pos = AliasProperty(get_value_pos, set_value_pos,
bind=('pos', 'size', 'min', 'max', 'padding',
'value_normalized', 'orientation'),
cache=True)
'''Position of the internal cursor, based on the normalized value.
:attr:`value_pos` is an :class:`~kivy.properties.AliasProperty`.
'''
def on_touch_down(self, touch):
if self.disabled or not self.collide_point(*touch.pos):
return
if touch.is_mouse_scrolling:
if 'down' in touch.button or 'left' in touch.button:
if self.step:
self.value = min(self.max, self.value + self.step)
else:
self.value = min(
self.max,
self.value + (self.max - self.min) / 20)
if 'up' in touch.button or 'right' in touch.button:
if self.step:
self.value = max(self.min, self.value - self.step)
else:
self.value = max(
self.min,
self.value - (self.max - self.min) / 20)
elif self.sensitivity == 'handle':
if self.children[0].collide_point(*touch.pos):
touch.grab(self)
else:
touch.grab(self)
self.value_pos = touch.pos
return True
def on_touch_move(self, touch):
if touch.grab_current == self:
self.value_pos = touch.pos
return True
def on_touch_up(self, touch):
if touch.grab_current == self:
self.value_pos = touch.pos
return True
if __name__ == '__main__':
from kivy.app import App
class SliderApp(App):
def build(self):
return Slider(padding=25)
SliderApp().run()
| mit | 356a60569731f30cdaae04d3f38219f6 | 30.814727 | 79 | 0.603927 | 4.058788 | false | false | false | false |
kivy/kivy | examples/frameworks/twisted/echo_server_app.py | 13 | 1202 | # install_twisted_rector must be called before importing and using the reactor
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from twisted.internet import reactor
from twisted.internet import protocol
class EchoServer(protocol.Protocol):
def dataReceived(self, data):
response = self.factory.app.handle_message(data)
if response:
self.transport.write(response)
class EchoServerFactory(protocol.Factory):
protocol = EchoServer
def __init__(self, app):
self.app = app
from kivy.app import App
from kivy.uix.label import Label
class TwistedServerApp(App):
label = None
def build(self):
self.label = Label(text="server started\n")
reactor.listenTCP(8000, EchoServerFactory(self))
return self.label
def handle_message(self, msg):
msg = msg.decode('utf-8')
self.label.text = "received: {}\n".format(msg)
if msg == "ping":
msg = "Pong"
if msg == "plop":
msg = "Kivy Rocks!!!"
self.label.text += "responded: {}\n".format(msg)
return msg.encode('utf-8')
if __name__ == '__main__':
TwistedServerApp().run()
| mit | f175ab65677dd26966c79fb6be0ea740 | 23.530612 | 78 | 0.638935 | 3.840256 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests_oauthlib/compliance_fixes/facebook.py | 89 | 1119 | from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from oauthlib.common import to_unicode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if 'application/json' in r.headers.get('content-type', {}):
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if 'text/plain' in r.headers.get('content-type', {}) and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get('expires')
if expires is not None:
token['expires_in'] = expires
token['token_type'] = 'Bearer'
r._content = to_unicode(dumps(token)).encode('UTF-8')
return r
session.register_compliance_hook('access_token_response', _compliance_fix)
return session
| mit | a9b579692a211138daa89c3d12d1b8b8 | 32.909091 | 86 | 0.635389 | 3.858621 | false | false | false | false |
kivy/kivy | kivy/core/__init__.py | 4 | 9877 | '''
Core Abstraction
================
This module defines the abstraction layers for our core providers and their
implementations. For further information, please refer to
:ref:`architecture` and the :ref:`providers` section of the documentation.
In most cases, you shouldn't directly use a library that's already covered
by the core abstraction. Always try to use our providers first.
In case we are missing a feature or method, please let us know by
opening a new Bug report instead of relying on your library.
.. warning::
These are **not** widgets! These are just abstractions of the respective
functionality. For example, you cannot add a core image to your window.
You have to use the image **widget** class instead. If you're really
looking for widgets, please refer to :mod:`kivy.uix` instead.
'''
import os
import sysconfig
import sys
import traceback
import tempfile
import subprocess
import importlib
import kivy
from kivy.logger import Logger
class CoreCriticalException(Exception):
pass
def core_select_lib(category, llist, create_instance=False,
base='kivy.core', basemodule=None):
if 'KIVY_DOC' in os.environ:
return
category = category.lower()
basemodule = basemodule or category
libs_ignored = []
errs = []
for option, modulename, classname in llist:
try:
# module activated in config ?
try:
if option not in kivy.kivy_options[category]:
libs_ignored.append(modulename)
Logger.debug(
'{0}: Provider <{1}> ignored by config'.format(
category.capitalize(), option))
continue
except KeyError:
pass
# import module
mod = importlib.__import__(name='{2}.{0}.{1}'.format(
basemodule, modulename, base),
globals=globals(),
locals=locals(),
fromlist=[modulename], level=0)
cls = mod.__getattribute__(classname)
# ok !
Logger.info('{0}: Provider: {1}{2}'.format(
category.capitalize(), option,
'({0} ignored)'.format(libs_ignored) if libs_ignored else ''))
if create_instance:
cls = cls()
return cls
except ImportError as e:
errs.append((option, e, sys.exc_info()[2]))
libs_ignored.append(modulename)
Logger.debug('{0}: Ignored <{1}> (import error)'.format(
category.capitalize(), option))
Logger.trace('', exc_info=e)
except CoreCriticalException as e:
errs.append((option, e, sys.exc_info()[2]))
Logger.error('{0}: Unable to use {1}'.format(
category.capitalize(), option))
Logger.error(
'{0}: The module raised an important error: {1!r}'.format(
category.capitalize(), e.message))
raise
except Exception as e:
errs.append((option, e, sys.exc_info()[2]))
libs_ignored.append(modulename)
Logger.trace('{0}: Unable to use {1}'.format(
category.capitalize(), option))
Logger.trace('', exc_info=e)
err = '\n'.join(['{} - {}: {}\n{}'.format(opt, e.__class__.__name__, e,
''.join(traceback.format_tb(tb))) for opt, e, tb in errs])
Logger.critical(
'{0}: Unable to find any valuable {0} provider. Please enable '
'debug logging (e.g. add -d if running from the command line, or '
'change the log level in the config) and re-run your app to '
'identify potential causes\n{1}'.format(category.capitalize(), err))
def core_register_libs(category, libs, base='kivy.core'):
if 'KIVY_DOC' in os.environ:
return
category = category.lower()
kivy_options = kivy.kivy_options[category]
libs_loadable = {}
libs_ignored = []
for option, lib in libs:
# module activated in config ?
if option not in kivy_options:
Logger.debug('{0}: option <{1}> ignored by config'.format(
category.capitalize(), option))
libs_ignored.append(lib)
continue
libs_loadable[option] = lib
libs_loaded = []
for item in kivy_options:
try:
# import module
try:
lib = libs_loadable[item]
except KeyError:
continue
importlib.__import__(name='{2}.{0}.{1}'.format(category, lib, base),
globals=globals(),
locals=locals(),
fromlist=[lib],
level=0)
libs_loaded.append(lib)
except Exception as e:
Logger.trace('{0}: Unable to use <{1}> as loader!'.format(
category.capitalize(), option))
Logger.trace('', exc_info=e)
libs_ignored.append(lib)
Logger.info('{0}: Providers: {1} {2}'.format(
category.capitalize(),
', '.join(libs_loaded),
'({0} ignored)'.format(
', '.join(libs_ignored)) if libs_ignored else ''))
return libs_loaded
def handle_win_lib_import_error(category, provider, mod_name):
if sys.platform != 'win32':
return
assert mod_name.startswith('kivy.')
kivy_root = os.path.dirname(kivy.__file__)
dirs = mod_name[5:].split('.')
mod_path = os.path.join(kivy_root, *dirs)
# get the full expected path to the compiled pyd file
# filename is <debug>.cp<major><minor>-<platform>.pyd
# https://github.com/python/cpython/blob/master/Doc/whatsnew/3.5.rst
if hasattr(sys, 'gettotalrefcount'): # debug
mod_path += '._d'
mod_path += '.cp{}{}-{}.pyd'.format(
sys.version_info.major, sys.version_info.minor,
sysconfig.get_platform().replace('-', '_'))
# does the compiled pyd exist at all?
if not os.path.exists(mod_path):
Logger.debug(
'{}: Failed trying to import "{}" for provider {}. Compiled file '
'does not exist. Have you perhaps forgotten to compile Kivy, or '
'did not install all required dependencies?'.format(
category, provider, mod_path))
return
# tell user to provide dependency walker
env_var = 'KIVY_{}_DEPENDENCY_WALKER'.format(provider.upper())
if env_var not in os.environ:
Logger.debug(
'{0}: Failed trying to import the "{1}" provider from "{2}". '
'This error is often encountered when a dependency is missing,'
' or if there are multiple copies of the same dependency dll on '
'the Windows PATH and they are incompatible with each other. '
'This can occur if you are mixing installations (such as different'
' python installations, like anaconda python and a system python) '
'or if another unrelated program added its directory to the PATH. '
'Please examine your PATH and python installation for potential '
'issues. To further troubleshoot a "DLL load failed" error, '
'please download '
'"Dependency Walker" (64 or 32 bit version - matching your python '
'bitness) from dependencywalker.com and set the environment '
'variable {3} to the full path of the downloaded depends.exe file '
'and rerun your application to generate an error report'.
format(category, provider, mod_path, env_var))
return
depends_bin = os.environ[env_var]
if not os.path.exists(depends_bin):
raise ValueError('"{}" provided in {} does not exist'.format(
depends_bin, env_var))
# make file for the resultant log
fd, temp_file = tempfile.mkstemp(
suffix='.dwi', prefix='kivy_depends_{}_log_'.format(provider),
dir=os.path.expanduser('~/'))
os.close(fd)
Logger.info(
'{}: Running dependency walker "{}" on "{}" to generate '
'troubleshooting log. Please wait for it to complete'.format(
category, depends_bin, mod_path))
Logger.debug(
'{}: Dependency walker command is "{}"'.format(
category,
[depends_bin, '/c', '/od:{}'.format(temp_file), mod_path]))
try:
subprocess.check_output([
depends_bin, '/c', '/od:{}'.format(temp_file), mod_path])
except subprocess.CalledProcessError as exc:
if exc.returncode >= 0x00010000:
Logger.error(
'{}: Dependency walker failed with error code "{}". No '
'error report was generated'.
format(category, exc.returncode))
return
Logger.info(
'{}: dependency walker generated "{}" containing troubleshooting '
'information about provider {} and its failing file "{} ({})". You '
'can open the file in dependency walker to view any potential issues '
'and troubleshoot it yourself. '
'To share the file with the Kivy developers and request support, '
'please contact us at our support channels '
'https://kivy.org/doc/master/contact.html (not on github, unless '
'it\'s truly a bug). Make sure to provide the generated file as well '
'as the *complete* Kivy log being printed here. Keep in mind the '
'generated dependency walker log file contains paths to dlls on your '
'system used by kivy or its dependencies to help troubleshoot them, '
'and these paths may include your name in them. Please view the '
'log file in dependency walker before sharing to ensure you are not '
'sharing sensitive paths'.format(
category, temp_file, provider, mod_name, mod_path))
| mit | 09849afcbc4706b91175093b9388ccfc | 38.987854 | 80 | 0.58702 | 4.309337 | false | false | false | false |
kivy/kivy | kivy/tools/stub-gl-debug.py | 4 | 13294 | # flake8: noqa
from __future__ import print_function
a = '''cdef void glActiveTexture (cgl.GLenum texture)
cdef void glAttachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glBindAttribLocation (cgl.GLuint program, cgl.GLuint index, cgl.GLchar* name)
cdef void glBindBuffer (cgl.GLenum target, cgl.GLuint buffer)
cdef void glBindFramebuffer (cgl.GLenum target, cgl.GLuint framebuffer)
cdef void glBindRenderbuffer (cgl.GLenum target, cgl.GLuint renderbuffer)
cdef void glBindTexture (cgl.GLenum target, cgl.GLuint texture)
cdef void glBlendColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glBlendEquation (cgl.GLenum mode)
cdef void glBlendEquationSeparate (cgl.GLenum modeRGB, cgl.GLenum modeAlpha)
cdef void glBlendFunc (cgl.GLenum sfactor, cgl.GLenum dfactor)
cdef void glBlendFuncSeparate (cgl.GLenum srcRGB, cgl.GLenum dstRGB, cgl.GLenum srcAlpha, cgl.GLenum dstAlpha)
cdef void glBufferData (cgl.GLenum target, cgl.GLsizeiptr size, cgl.GLvoid* data, cgl.GLenum usage)
cdef void glBufferSubData (cgl.GLenum target, cgl.GLintptr offset, cgl.GLsizeiptr size, cgl.GLvoid* data)
cdef cgl.GLenum glCheckFramebufferStatus (cgl.GLenum target)
cdef void glClear (cgl.GLbitfield mask)
cdef void glClearColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glClearDepthf (cgl.GLclampf depth)
cdef void glClearStencil (cgl.GLint s)
cdef void glColorMask (cgl.GLboolean red, cgl.GLboolean green, cgl.GLboolean blue, cgl.GLboolean alpha)
cdef void glCompileShader (cgl.GLuint shader)
cdef void glCompressedTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCompressedTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCopyTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border)
cdef void glCopyTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
cdef cgl.GLuint glCreateProgram ()
cdef cgl.GLuint glCreateShader (cgl.GLenum type)
cdef void glCullFace (cgl.GLenum mode)
cdef void glDeleteBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glDeleteFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glDeleteProgram (cgl.GLuint program)
cdef void glDeleteRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glDeleteShader (cgl.GLuint shader)
cdef void glDeleteTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glDepthFunc (cgl.GLenum func)
cdef void glDepthMask (cgl.GLboolean flag)
cdef void glDepthRangef (cgl.GLclampf zNear, cgl.GLclampf zFar)
cdef void glDetachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glDisable (cgl.GLenum cap)
cdef void glDisableVertexAttribArray (cgl.GLuint index)
cdef void glDrawArrays (cgl.GLenum mode, cgl.GLint first, cgl.GLsizei count)
cdef void glDrawElements (cgl.GLenum mode, cgl.GLsizei count, cgl.GLenum type, cgl.GLvoid* indices)
cdef void glEnable (cgl.GLenum cap)
cdef void glEnableVertexAttribArray (cgl.GLuint index)
cdef void glFinish ()
cdef void glFlush ()
cdef void glFramebufferRenderbuffer (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum renderbuffertarget, cgl.GLuint renderbuffer)
cdef void glFramebufferTexture2D (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum textarget, cgl.GLuint texture, cgl.GLint level)
cdef void glFrontFace (cgl.GLenum mode)
cdef void glGenBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glGenerateMipmap (cgl.GLenum target)
cdef void glGenFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glGenRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glGenTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glGetActiveAttrib (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetActiveUniform (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetAttachedShaders (cgl.GLuint program, cgl.GLsizei maxcount, cgl.GLsizei* count, cgl.GLuint* shaders)
cdef int glGetAttribLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetBooleanv (cgl.GLenum pname, cgl.GLboolean* params)
cdef void glGetBufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef cgl.GLenum glGetError ()
cdef void glGetFloatv (cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetFramebufferAttachmentParameteriv (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetIntegerv (cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramiv (cgl.GLuint program, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramInfoLog (cgl.GLuint program, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
cdef void glGetRenderbufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderiv (cgl.GLuint shader, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderInfoLog (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
#cdef void glGetShaderPrecisionFormat (cgl.GLenum shadertype, cgl.GLenum precisiontype, cgl.GLint* range, cgl.GLint* precision)
cdef void glGetShaderSource (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* source)
cdef cgl.GLubyte* glGetString (cgl.GLenum name)
cdef void glGetTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetUniformfv (cgl.GLuint program, cgl.GLint location, cgl.GLfloat* params)
cdef void glGetUniformiv (cgl.GLuint program, cgl.GLint location, cgl.GLint* params)
cdef int glGetUniformLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetVertexAttribfv (cgl.GLuint index, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetVertexAttribiv (cgl.GLuint index, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetVertexAttribPointerv (cgl.GLuint index, cgl.GLenum pname, cgl.GLvoid** pointer)
cdef void glHint (cgl.GLenum target, cgl.GLenum mode)
cdef cgl.GLboolean glIsBuffer (cgl.GLuint buffer)
cdef cgl.GLboolean glIsEnabled (cgl.GLenum cap)
cdef cgl.GLboolean glIsFramebuffer (cgl.GLuint framebuffer)
cdef cgl.GLboolean glIsProgram (cgl.GLuint program)
cdef cgl.GLboolean glIsRenderbuffer (cgl.GLuint renderbuffer)
cdef cgl.GLboolean glIsShader (cgl.GLuint shader)
cdef cgl.GLboolean glIsTexture (cgl.GLuint texture)
cdef void glLineWidth (cgl.GLfloat width)
cdef void glLinkProgram (cgl.GLuint program)
cdef void glPixelStorei (cgl.GLenum pname, cgl.GLint param)
cdef void glPolygonOffset (cgl.GLfloat factor, cgl.GLfloat units)
cdef void glReadPixels (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
#cdef void glReleaseShaderCompiler ()
cdef void glRenderbufferStorage (cgl.GLenum target, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height)
cdef void glSampleCoverage (cgl.GLclampf value, cgl.GLboolean invert)
cdef void glScissor (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
#cdef void glShaderBinary (cgl.GLsizei n, cgl.GLuint* shaders, cgl.GLenum binaryformat, cgl.GLvoid* binary, cgl.GLsizei length)
cdef void glShaderSource (cgl.GLuint shader, cgl.GLsizei count, cgl.GLchar** string, cgl.GLint* length)
cdef void glStencilFunc (cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilFuncSeparate (cgl.GLenum face, cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilMask (cgl.GLuint mask)
cdef void glStencilMaskSeparate (cgl.GLenum face, cgl.GLuint mask)
cdef void glStencilOp (cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glStencilOpSeparate (cgl.GLenum face, cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glTexParameterf (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat param)
cdef void glTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glTexParameteri (cgl.GLenum target, cgl.GLenum pname, cgl.GLint param)
cdef void glTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glUniform1f (cgl.GLint location, cgl.GLfloat x)
cdef void glUniform1fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform1i (cgl.GLint location, cgl.GLint x)
cdef void glUniform1iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform2f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y)
cdef void glUniform2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform2i (cgl.GLint location, cgl.GLint x, cgl.GLint y)
cdef void glUniform2iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform3f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glUniform3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform3i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z)
cdef void glUniform3iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform4f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glUniform4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform4i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z, cgl.GLint w)
cdef void glUniform4iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniformMatrix2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUseProgram (cgl.GLuint program)
cdef void glValidateProgram (cgl.GLuint program)
cdef void glVertexAttrib1f (cgl.GLuint indx, cgl.GLfloat x)
cdef void glVertexAttrib1fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib2f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y)
cdef void glVertexAttrib2fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib3f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glVertexAttrib3fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib4f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glVertexAttrib4fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttribPointer (cgl.GLuint indx, cgl.GLint size, cgl.GLenum type, cgl.GLboolean normalized, cgl.GLsizei stride, cgl.GLvoid* ptr)
cdef void glViewport (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)'''
def replace(s):
item = s.split(' ')
rettype = item[1]
item = item[2:]
for x in item:
x = x.strip()
if not x or x.startswith('GL'):
continue
if x.startswith('(GL'):
yield '('
continue
if x.startswith('gl'):
prefix = ''
if rettype != 'void':
prefix = 'return '
yield '%scgl.%s' % (prefix, x)
continue
yield x
print('''
# This file was automatically generated with kivy/tools/stub-gl-debug.py
cimport c_opengl as cgl
''')
lines = a.splitlines()
for x in lines:
if x.startswith('#'):
# There are some functions that either do not exist or break on OSX.
# Just skip those.
print('# Skipping generation of: "%s"' % x)
continue
x = x.replace('cgl.', '')
y = ' '.join(replace(x))
print('%s with gil:' % x)
s = x.split()
print(' print("GL %s(' % s[2], end=' ')
pointer = 0
for arg in s[3:]:
arg = arg.strip()
arg = arg.replace(',', '').replace(')', '')
if 'GL' in arg or arg == '(':
pointer = arg.count('*')
continue
pointer = '*' * pointer
if pointer:
print('%s%s=", repr(hex(<long> %s)), ",' % (arg, pointer, arg), end=' ')
else:
print('%s = ", %s, ",' % (arg, arg), end=' ')
pointer = 0
print(')")')
print(' %s' % y)
print(' ret = glGetError()')
print(' if ret: print("ERR {} / {}".format(ret, ret))')
| mit | 5afcf2c0199385d232565f40f5b70356 | 65.139303 | 212 | 0.746051 | 2.972719 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/updateSchema.py | 1 | 1673 | import logging
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from registration import Hacker
import reimbursement
BATCH_SIZE = 100 # ideal batch size may vary based on entity size.
def updateSchema(cursor=None, num_updated=0):
to_put = []
hackers, next_curs, more = Hacker.query().fetch_page(BATCH_SIZE, start_cursor=cursor)
for hacker in hackers:
append = False
#Making sure nobody is admitted
hacker.admitted_email_sent_date = None
#Changing receipts to a repeated property
if hacker.receipts is None or hacker.receipts == [None]:
hacker.receipts = []
append = True
elif isinstance(hacker.receipts, ndb.BlobKeyProperty):
receipt = hacker.receipts
hacker.receipts = [receipt]
append = True
#Removing schools with whitespace around them.
if hacker.school.strip() != hacker.school:
hacker.school = hacker.school.strip()
append = True
#Creating the reimbursement max field
if hacker.rmax == 0 or hacker.rmax is None:
hacker.rmax = reimbursement.getMax(hacker.school)
append = True
if append:
to_put.append(hacker)
if to_put:
ndb.put_multi(to_put)
num_updated += len(to_put)
logging.debug(
'Put %d entities to Datastore for a total of %d',
len(to_put), num_updated)
deferred.defer(
updateSchema, cursor=next_curs, num_updated=num_updated)
else:
logging.debug(
'updateSchema complete with %d updates!', num_updated)
| mit | adf076bccf5c44794ad937a31d3094ad | 30.566038 | 89 | 0.62104 | 3.726058 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/twilio/rest/resources/call_feedback.py | 47 | 2977 | from . import (
ListResource,
InstanceResource,
transform_params,
)
class CallFeedback(InstanceResource):
def __init__(self, parent):
self.parent = parent
super(CallFeedback, self).__init__(
parent,
None,
)
class CallFeedbackFactory(ListResource):
"""
CallFeedback is a unique endpoint in the API in that it only
has an instance representation, and that instance representation
lives at the same URL you POST to to create it. Here, our
ListResource class acts as a Factory resource.
"""
name = "Feedback"
instance = CallFeedback
def create(self, **kwargs):
"""
Create a :class:`CallFeedback` object for the parent call.
:param int quality: The score quality. Must be an
int between 1 and 5.
:param list issue: A list of issues. The issue types are
found at the CallFeedback rest docs.
"""
return self.create_instance(kwargs)
def get(self, **kwargs):
""" Get the feedback for this call
Usage:
.. code-block:: python
feedback = client.calls.get("CA123").feedback
print feedback.issues
:rtype: :class:`~twilio.rest.resources.InstanceResource`
:raises: a :exc:`~twilio.TwilioRestException` if the request fails
"""
params = transform_params(kwargs)
_, data = self.request("GET", self.uri, params=params)
return self.load_instance(data)
def load_instance(self, data):
# Overridden because CallFeedback instances
# don't contain sids
instance = self.instance(self)
instance.load(data)
return instance
class CallFeedbackSummaryInstance(InstanceResource):
def __init__(self, parent):
self.parent = parent
super(CallFeedbackSummaryInstance, self).__init__(
parent,
None,
)
class CallFeedbackSummary(ListResource):
name = "Summary"
key = "Feedback"
instance = CallFeedbackSummaryInstance
def __init__(self, parent, *args, **kwargs):
super(CallFeedbackSummary, self).__init__(*args, **kwargs)
self.base_uri = parent.uri
def get(self, **kwargs):
""" Get the feedback summary for calls on this account
Usage:
.. code-block:: python
summary = client.calls.summary.get()
print summary.quality_score_average
:rtype: :class:`~twilio.rest.resources.InstanceResource`
:raises: a :exc:`~twilio.TwilioRestException` if the request fails
"""
params = transform_params(kwargs)
_, data = self.request('GET', self.uri, params=params)
return self.load_instance(data)
def load_instance(self, data):
# Overridden because CallFeedback summaries
# do not contain sids
instance = self.instance(self)
instance.load(data)
return instance
| mit | 20bb733b408c50ac8201f6851a14fbe0 | 26.564815 | 74 | 0.614713 | 4.443284 | false | false | false | false |
kivy/kivy | kivy/geometry.py | 5 | 3792 | '''
Geometry utilities
==================
This module contains some helper functions for geometric calculations.
'''
__all__ = ('circumcircle', 'minimum_bounding_circle')
from kivy.vector import Vector
def circumcircle(a, b, c):
'''
Computes the circumcircle of a triangle defined by a, b, c.
See: http://en.wikipedia.org/wiki/Circumscribed_circle
:Parameters:
`a`: iterable containing at least 2 values (for x and y)
The 1st point of the triangle.
`b`: iterable containing at least 2 values (for x and y)
The 2nd point of the triangle.
`c`: iterable containing at least 2 values (for x and y)
The 3rd point of the triangle.
:Return:
A tuple that defines the circle :
* The first element in the returned tuple is the center as (x, y)
* The second is the radius (float)
'''
P = Vector(a[0], a[1])
Q = Vector(b[0], b[1])
R = Vector(c[0], c[1])
mPQ = (P + Q) * .5
mQR = (Q + R) * .5
numer = -(- mPQ.y * R.y + mPQ.y * Q.y + mQR.y * R.y - mQR.y * Q.y -
mPQ.x * R.x + mPQ.x * Q.x + mQR.x * R.x - mQR.x * Q.x)
denom = (-Q.x * R.y + P.x * R.y - P.x * Q.y +
Q.y * R.x - P.y * R.x + P.y * Q.x)
t = numer / denom
cx = -t * (Q.y - P.y) + mPQ.x
cy = t * (Q.x - P.x) + mPQ.y
return ((cx, cy), (P - (cx, cy)).length())
def minimum_bounding_circle(points):
'''
Returns the minimum bounding circle for a set of points.
For a description of the problem being solved, see the `Smallest Circle
Problem <http://en.wikipedia.org/wiki/Smallest_circle_problem>`_.
The function uses Applet's Algorithm, the runtime is ``O(h^3, *n)``,
where h is the number of points in the convex hull of the set of points.
**But** it runs in linear time in almost all real world cases.
See: http://tinyurl.com/6e4n5yb
:Parameters:
`points`: iterable
A list of points (2 tuple with x,y coordinates)
:Return:
A tuple that defines the circle:
* The first element in the returned tuple is the center (x, y)
* The second the radius (float)
'''
points = [Vector(p[0], p[1]) for p in points]
if len(points) == 1:
return (points[0].x, points[0].y), 0.0
if len(points) == 2:
p1, p2 = points
return (p1 + p2) * .5, ((p1 - p2) * .5).length()
# determine a point P with the smallest y value
P = min(points, key=lambda p: p.y)
# find a point Q such that the angle of the line segment
# PQ with the x axis is minimal
def x_axis_angle(q):
if q == P:
return 1e10 # max val if the same, to skip
return abs((q - P).angle((1, 0)))
Q = min(points, key=x_axis_angle)
for p in points:
# find R such that angle PRQ is minimal
def angle_pq(r):
if r in (P, Q):
return 1e10 # max val if the same, to skip
return abs((r - P).angle(r - Q))
R = min(points, key=angle_pq)
# check for case 1 (angle PRQ is obtuse), the circle is determined
# by two points, P and Q. radius = |(P-Q)/2|, center = (P+Q)/2
if angle_pq(R) > 90.0:
return (P + Q) * .5, ((P - Q) * .5).length()
# if angle RPQ is obtuse, make P = R, and try again
if abs((R - P).angle(Q - P)) > 90:
P = R
continue
# if angle PQR is obtuse, make Q = R, and try again
if abs((P - Q).angle(R - Q)) > 90:
Q = R
continue
# all angles were acute..we just need the circle through the
# two points furthest apart!
break
# find the circumcenter for triangle given by P,Q,R
return circumcircle(P, Q, R)
| mit | 9e972846111987f5698bae5e42b8e8df | 30.338843 | 76 | 0.548787 | 3.20541 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/oauthlib/oauth2/rfc6749/clients/base.py | 37 | 20354 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import time
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.parameters import parse_token_response
from oauthlib.oauth2.rfc6749.parameters import prepare_token_request
from oauthlib.oauth2.rfc6749.parameters import prepare_token_revocation_request
from oauthlib.oauth2.rfc6749.errors import TokenExpiredError
from oauthlib.oauth2.rfc6749.errors import InsecureTransportError
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client(object):
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplide inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self._expires_at = None
self._populate_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: http://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: http://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = dict(
(k.lower(), v) for k, v in self.token_types.items())
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not self.access_token:
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
self.scope = scope or self.scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_uri,
scope=self.scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successfull
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param body: Request body (URL encoded string).
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Request body (URL encoded string).
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
self.scope = scope or self.scope
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self._populate_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request('refresh_token', body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
"""Add commonly used values such as access_token to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'code' in response:
self.code = response.get('code')
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
| mit | e0d1f37e13707cd4a0e25fb9b326efdc | 40.538776 | 107 | 0.647735 | 4.479313 | false | false | false | false |
kivy/kivy | examples/gestures/gesture_board.py | 1 | 2833 | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Color, Ellipse, Line
from kivy.gesture import Gesture, GestureDatabase
from my_gestures import cross, circle, check, square
def simplegesture(name, point_list):
"""
A simple helper function
"""
g = Gesture()
g.add_stroke(point_list)
g.normalize()
g.name = name
return g
class GestureBoard(FloatLayout):
"""
Our application main widget, derived from touchtracer example, use data
constructed from touches to match symbols loaded from my_gestures.
"""
def __init__(self, *args, **kwargs):
super(GestureBoard, self).__init__()
self.gdb = GestureDatabase()
# add pre-recorded gestures to database
self.gdb.add_gesture(cross)
self.gdb.add_gesture(check)
self.gdb.add_gesture(circle)
self.gdb.add_gesture(square)
def on_touch_down(self, touch):
# start collecting points in touch.ud
# create a line to display the points
userdata = touch.ud
with self.canvas:
Color(1, 1, 0)
d = 30.
Ellipse(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))
userdata['line'] = Line(points=(touch.x, touch.y))
return True
def on_touch_move(self, touch):
# store points of the touch movement
try:
touch.ud['line'].points += [touch.x, touch.y]
return True
except (KeyError) as e:
pass
def on_touch_up(self, touch):
# touch is over, display information, and check if it matches some
# known gesture.
g = simplegesture('', list(zip(touch.ud['line'].points[::2],
touch.ud['line'].points[1::2])))
# gestures to my_gestures.py
print("gesture representation:", self.gdb.gesture_to_str(g))
# print match scores between all known gestures
print("cross:", g.get_score(cross))
print("check:", g.get_score(check))
print("circle:", g.get_score(circle))
print("square:", g.get_score(square))
# use database to find the more alike gesture, if any
g2 = self.gdb.find(g, minscore=0.70)
print(g2)
if g2:
if g2[1] == circle:
print("circle")
if g2[1] == square:
print("square")
if g2[1] == check:
print("check")
if g2[1] == cross:
print("cross")
# erase the lines on the screen, this is a bit quick&dirty, since we
# can have another touch event on the way...
self.canvas.clear()
class DemoGesture(App):
def build(self):
return GestureBoard()
if __name__ == '__main__':
DemoGesture().run()
| mit | 12c1c0903ee478f7c161bdd6d7473d8a | 28.821053 | 76 | 0.569714 | 3.712975 | false | false | false | false |
kivy/kivy | examples/keyboard/main.py | 5 | 7597 | """
Custom Keyboards
================
This demo shows how to create and display custom keyboards on screen.
Note that the new "input_type" property of the TextInput means that this
is rarely needed. We provide this demo for the sake of completeness.
"""
# Author: Zen-CODE
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.vkeyboard import VKeyboard
from kivy.properties import ObjectProperty
from kivy.uix.button import Button
from functools import partial
from kivy.config import Config
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy import require
# This example uses features introduced in Kivy 1.8.0, namely being able
# to load custom json files from the app folder.
require("1.8.0")
Builder.load_string('''
<KeyboardScreen>:
displayLabel: displayLabel
kbContainer: kbContainer
BoxLayout:
orientation: 'vertical'
Label:
size_hint_y: 0.15
text: "Available Keyboard Layouts"
BoxLayout:
id: kbContainer
size_hint_y: 0.2
orientation: "horizontal"
padding: 10
Label:
id: displayLabel
size_hint_y: 0.15
markup: True
text: "[b]Key pressed[/b] - None"
halign: "center"
Button:
text: "Back"
size_hint_y: 0.1
on_release: root.manager.current = "mode"
Widget:
# Just a space taker to allow for the popup keyboard
size_hint_y: 0.5
<ModeScreen>:
center_label: center_label
mode_spinner: mode_spinner
FloatLayout:
BoxLayout:
orientation: "vertical"
size_hint: 0.8, 0.8
pos_hint: {"x": 0.1, "y": 0.1}
padding: "5sp"
spacing: "5sp"
Label:
canvas:
Color:
rgba: 0, 0, 1, 0.3
Rectangle:
pos: self.pos
size: self.size
text: "Custom Keyboard Demo"
size_hint_y: 0.1
Label:
id: center_label
markup: True
size_hint_y: 0.6
BoxLayout:
orientation: "horizontal"
size_hint_y: 0.1
padding: "5sp"
Widget:
size_hint_x: 0.2
Label:
text: "Current keyboard mode :"
Spinner:
id: mode_spinner
values: "''", "'dock'", "'system'", "'systemanddock'",\
"'systemandmulti'"
Button:
text: "Set"
on_release: root.set_mode(mode_spinner.text)
Widget:
size_hint_x: 0.2
Widget:
size_hint_y: 0.1
BoxLayout:
orientation: "horizontal"
size_hint_y: 0.1
Button:
text: "Exit"
on_release: exit()
Button:
text: "Continue"
on_release: root.next()
''')
class ModeScreen(Screen):
"""
Present the option to change keyboard mode and warn of system-wide
consequences.
"""
center_label = ObjectProperty()
mode_spinner = ObjectProperty()
keyboard_mode = ""
def on_pre_enter(self, *args):
""" Detect the current keyboard mode and set the text of the main
label accordingly. """
self.keyboard_mode = Config.get("kivy", "keyboard_mode")
self.mode_spinner.text = "'{0}'".format(self.keyboard_mode)
p1 = "Current keyboard mode: '{0}'\n\n".format(self.keyboard_mode)
if self.keyboard_mode in ['dock', 'system', 'systemanddock']:
p2 = "You have the right setting to use this demo.\n\n"
else:
p2 = "You need the keyboard mode to 'dock', 'system' or '"\
"'systemanddock'(below)\n in order to "\
"use custom onscreen keyboards.\n\n"
p3 = "[b][color=#ff0000]Warning:[/color][/b] This is a system-wide " \
"setting and will affect all Kivy apps. If you change the\n" \
" keyboard mode, please use this app" \
" to reset this value to its original one."
self.center_label.text = "".join([p1, p2, p3])
def set_mode(self, mode):
""" Sets the keyboard mode to the one specified """
Config.set("kivy", "keyboard_mode", mode.replace("'", ""))
Config.write()
self.center_label.text = "Please restart the application for this\n" \
"setting to take effect."
def next(self):
""" Continue to the main screen """
self.manager.current = "keyboard"
class KeyboardScreen(Screen):
"""
Screen containing all the available keyboard layouts. Clicking the buttons
switches to these layouts.
"""
displayLabel = ObjectProperty()
kbContainer = ObjectProperty()
def __init__(self, **kwargs):
super(KeyboardScreen, self).__init__(**kwargs)
self._add_keyboards()
self._keyboard = None
def _add_keyboards(self):
""" Add a buttons for each available keyboard layout. When clicked,
the buttons will change the keyboard layout to the one selected. """
layouts = list(VKeyboard().available_layouts.keys())
# Add the file in our app directory, the .json extension is required.
layouts.append("numeric.json")
for key in layouts:
self.kbContainer.add_widget(
Button(
text=key,
on_release=partial(self.set_layout, key)))
def set_layout(self, layout, button):
""" Change the keyboard layout to the one specified by *layout*. """
kb = Window.request_keyboard(
self._keyboard_close, self)
if kb.widget:
# If the current configuration supports Virtual Keyboards, this
# widget will be a kivy.uix.vkeyboard.VKeyboard instance.
self._keyboard = kb.widget
self._keyboard.layout = layout
else:
self._keyboard = kb
self._keyboard.bind(on_key_down=self.key_down,
on_key_up=self.key_up)
def _keyboard_close(self, *args):
""" The active keyboard is being closed. """
if self._keyboard:
self._keyboard.unbind(on_key_down=self.key_down)
self._keyboard.unbind(on_key_up=self.key_up)
self._keyboard = None
def key_down(self, keyboard, keycode, text, modifiers):
""" The callback function that catches keyboard events. """
self.displayLabel.text = u"Key pressed - {0}".format(text)
# def key_up(self, keyboard, keycode):
def key_up(self, keyboard, keycode, *args):
""" The callback function that catches keyboard events. """
# system keyboard keycode: (122, 'z')
# dock keyboard keycode: 'z'
if isinstance(keycode, tuple):
keycode = keycode[1]
self.displayLabel.text += u" (up {0})".format(keycode)
class KeyboardDemo(App):
sm = None # The root screen manager
def build(self):
self.sm = ScreenManager()
self.sm.add_widget(ModeScreen(name="mode"))
self.sm.add_widget(KeyboardScreen(name="keyboard"))
self.sm.current = "mode"
return self.sm
if __name__ == "__main__":
KeyboardDemo().run()
| mit | 8faf1a4cb6ebf68a6c817470047e7ef6 | 32.615044 | 78 | 0.55206 | 4.155908 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/cssutils/css/csscomment.py | 10 | 2834 | """CSSComment is not defined in DOM Level 2 at all but a cssutils defined
class only.
Implements CSSRule which is also extended for a CSSComment rule type.
"""
__all__ = ['CSSComment']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import cssrule
import cssutils
import xml.dom
class CSSComment(cssrule.CSSRule):
"""
Represents a CSS comment (cssutils only).
Format::
/*...*/
"""
def __init__(self, cssText=None, parentRule=None,
parentStyleSheet=None, readonly=False):
super(CSSComment, self).__init__(parentRule=parentRule,
parentStyleSheet=parentStyleSheet)
self._cssText = None
if cssText:
self._setCssText(cssText)
self._readonly = readonly
def __repr__(self):
return u"cssutils.css.%s(cssText=%r)" % (
self.__class__.__name__,
self.cssText)
def __str__(self):
return u"<cssutils.css.%s object cssText=%r at 0x%x>" % (
self.__class__.__name__,
self.cssText,
id(self))
def _getCssText(self):
"""Return serialized property cssText."""
return cssutils.ser.do_CSSComment(self)
def _setCssText(self, cssText):
"""
:param cssText:
textual text to set or tokenlist which is not tokenized
anymore. May also be a single token for this rule
:exceptions:
- :exc:`~xml.dom.SyntaxErr`:
Raised if the specified CSS string value has a syntax error and
is unparsable.
- :exc:`~xml.dom.InvalidModificationErr`:
Raised if the specified CSS string value represents a different
type of rule than the current one.
- :exc:`~xml.dom.NoModificationAllowedErr`:
Raised if the rule is readonly.
"""
super(CSSComment, self)._setCssText(cssText)
tokenizer = self._tokenize2(cssText)
commenttoken = self._nexttoken(tokenizer)
unexpected = self._nexttoken(tokenizer)
if not commenttoken or\
self._type(commenttoken) != self._prods.COMMENT or\
unexpected:
self._log.error(u'CSSComment: Not a CSSComment: %r' %
self._valuestr(cssText),
error=xml.dom.InvalidModificationErr)
else:
self._cssText = self._tokenvalue(commenttoken)
cssText = property(_getCssText, _setCssText,
doc=u"The parsable textual representation of this rule.")
type = property(lambda self: self.COMMENT,
doc=u"The type of this rule, as defined by a CSSRule "
u"type constant.")
# constant but needed:
wellformed = property(lambda self: True)
| mit | 45e7391c8a16b8b83de32592dffc25ab | 31.574713 | 77 | 0.577276 | 4.236173 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/cssutils/tests/test_codec.py | 3 | 15343 | """Testcases for cssutils.codec"""
import codecs
import unittest
import sys
PY2x = sys.version_info < (3,0)
if PY2x:
import StringIO
iostream = StringIO.StringIO
else:
import io
iostream = io.BytesIO
from cssutils import codec
try:
codecs.lookup("utf-32")
except LookupError:
haveutf32 = False
else:
haveutf32 = True
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = "".encode()
def write(self, chars):
# TODO ???
if not PY2x:
if isinstance(chars, str):
chars = chars.encode()
elif isinstance(chars, int):
chars = bytes([chars])
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = "".encode()
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class CodecTestCase(unittest.TestCase):
def test_detectencoding_str(self):
"codec.detectencoding_str()"
self.assertEqual(codec.detectencoding_str(u''.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xef'.encode('latin1')), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xef\x33'.encode("utf-8")), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\xc3\xaf3'.encode("utf-8")), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\xef\xbb'.encode("latin1")), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xef\xbb\x33'.encode("utf-8")), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\xef\xbb\xbf'.encode("utf-8-sig")), ("utf-8-sig", True))
self.assertEqual(codec.detectencoding_str(u'\xff'.encode("latin1")), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xff\x33'.encode("utf-8")), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\xff\xfe'.encode("latin1")), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xff\xfe\x33'.encode("utf-16")), ("utf-16", True))
self.assertEqual(codec.detectencoding_str(u'\xff\xfe\x00'.encode("latin1")), (None, False))
self.assertEqual(codec.detectencoding_str(u'\xff\xfe\x00\x33'.encode("utf-16")), ("utf-16", True))
if haveutf32:
self.assertEqual(codec.detectencoding_str(u'\xff\xfe\x00\x00'.encode("utf-32")), ("utf-32", True))
self.assertEqual(codec.detectencoding_str(u'\x00'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'\x00\x33'.encode()), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\x00\x00'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'\x00\x00\x33'.encode()), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'\x00\x00\xfe'.encode('latin1')), (None, False))
self.assertEqual(codec.detectencoding_str(u'\x00\x00\x00\x33'.encode()), ("utf-8", False))
if haveutf32:
self.assertEqual(codec.detectencoding_str(u'\x00\x00\x00@'.encode()), ("utf-32-be", False))
self.assertEqual(codec.detectencoding_str(u'\x00\x00\xfe\xff'.encode('utf-32')), ("utf-32", True))
self.assertEqual(codec.detectencoding_str(u'@'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@\x33'.encode()), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'@\x00'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@\x00\x33'.encode()), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u'@\x00\x00'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@\x00\x00\x33'.encode()), ("utf-8", False))
if haveutf32:
self.assertEqual(codec.detectencoding_str(u'@\x00\x00\x00'.encode()), ("utf-32-le", False))
self.assertEqual(codec.detectencoding_str(u'@c'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@ch'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@cha'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@char'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@chars'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charse'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charset'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charset '.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charset "'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charset "x'.encode()), (None, False))
self.assertEqual(codec.detectencoding_str(u'@charset ""'.encode()), ("", True))
self.assertEqual(codec.detectencoding_str(u'@charset "x"'.encode()), ("x", True))
self.assertEqual(codec.detectencoding_str(u"@".encode(), False), (None, False))
self.assertEqual(codec.detectencoding_str(u"@".encode(), True), ("utf-8", False))
self.assertEqual(codec.detectencoding_str(u"@c".encode(), False), (None, False))
self.assertEqual(codec.detectencoding_str(u"@c".encode(), True), ("utf-8", False))
def test_detectencoding_unicode(self):
"codec.detectencoding_unicode()"
# Unicode version (only parses the header)
self.assertEqual(codec.detectencoding_unicode(u'@charset "x'), (None, False))
self.assertEqual(codec.detectencoding_unicode(u'a {}'), ("utf-8", False))
self.assertEqual(codec.detectencoding_unicode(u'@charset "x', True), (None, False))
self.assertEqual(codec.detectencoding_unicode(u'@charset "x"'), ("x", True))
def test_fixencoding(self):
"codec._fixencoding()"
s = u'@charset "'
self.assertTrue(codec._fixencoding(s, u"utf-8") is None)
s = u'@charset "x'
self.assertTrue(codec._fixencoding(s, u"utf-8") is None)
s = u'@charset "x'
self.assertEqual(codec._fixencoding(s, u"utf-8", True), s)
s = u'@charset x'
self.assertEqual(codec._fixencoding(s, u"utf-8"), s)
s = u'@charset "x"'
self.assertEqual(codec._fixencoding(s, u"utf-8"), s.replace('"x"', '"utf-8"'))
def test_decoder(self):
"codecs.decoder"
def checkauto(encoding, input=u'@charset "x";g\xfcrk\u20ac{}'):
outputencoding = encoding
if outputencoding == "utf-8-sig":
outputencoding = "utf-8"
# Check stateless decoder with encoding autodetection
d = codecs.getdecoder("css")
self.assertEqual(d(input.encode(encoding))[0], input.replace('"x"', '"%s"' % outputencoding))
# Check stateless decoder with specified encoding
self.assertEqual(d(input.encode(encoding), encoding=encoding)[0], input.replace('"x"', '"%s"' % outputencoding))
if hasattr(codec, "getincrementaldecoder"):
# Check incremental decoder with encoding autodetection
id = codecs.getincrementaldecoder("css")()
self.assertEqual("".join(id.iterdecode(input.encode(encoding))), input.replace('"x"', '"%s"' % outputencoding))
# Check incremental decoder with specified encoding
id = codecs.getincrementaldecoder("css")(encoding=encoding)
self.assertEqual("".join(id.iterdecode(input.encode(encoding))), input.replace('"x"', '"%s"' % outputencoding))
# Check stream reader with encoding autodetection
q = Queue()
sr = codecs.getreader("css")(q)
result = []
# TODO: py3 only???
for c in input.encode(encoding):
q.write(c)
result.append(sr.read())
self.assertEqual("".join(result), input.replace('"x"', '"%s"' % outputencoding))
# Check stream reader with specified encoding
q = Queue()
sr = codecs.getreader("css")(q, encoding=encoding)
result = []
for c in input.encode(encoding):
q.write(c)
result.append(sr.read())
self.assertEqual("".join(result), input.replace('"x"', '"%s"' % outputencoding))
# Autodetectable encodings
checkauto("utf-8-sig")
checkauto("utf-16")
checkauto("utf-16-le")
checkauto("utf-16-be")
if haveutf32:
checkauto("utf-32")
checkauto("utf-32-le")
checkauto("utf-32-be")
def checkdecl(encoding, input=u'@charset "%s";g\xfcrk{}'):
# Check stateless decoder with encoding autodetection
d = codecs.getdecoder("css")
input = input % encoding
outputencoding = encoding
if outputencoding == "utf-8-sig":
outputencoding = "utf-8"
self.assertEqual(d(input.encode(encoding))[0], input)
# Check stateless decoder with specified encoding
self.assertEqual(d(input.encode(encoding), encoding=encoding)[0], input)
if hasattr(codec, "getincrementaldecoder"):
# Check incremental decoder with encoding autodetection
id = codecs.getincrementaldecoder("css")()
self.assertEqual("".join(id.iterdecode(input.encode(encoding))), input)
# Check incremental decoder with specified encoding
id = codecs.getincrementaldecoder("css")(encoding)
self.assertEqual("".join(id.iterdecode(input.encode(encoding))), input)
# Check stream reader with encoding autodetection
q = Queue()
sr = codecs.getreader("css")(q)
result = []
for c in input.encode(encoding):
q.write(c)
result.append(sr.read())
self.assertEqual("".join(result), input)
# Check stream reader with specified encoding
q = Queue()
sr = codecs.getreader("css")(q, encoding=encoding)
result = []
for c in input.encode(encoding):
q.write(c)
result.append(sr.read())
self.assertEqual("".join(result), input)
# Use correct declaration
checkdecl("utf-8")
checkdecl("iso-8859-1", u'@charset "%s";g\xfcrk')
checkdecl("iso-8859-15")
checkdecl("cp1252")
# No recursion
self.assertRaises(ValueError, u'@charset "css";div{}'.encode().decode, "css")
def test_encoder(self):
"codec.encoder"
def check(encoding, input=u'@charset "x";g\xfcrk\u20ac{}'):
outputencoding = encoding
if outputencoding == "utf-8-sig":
outputencoding = "utf-8"
# Check stateless encoder with encoding autodetection
e = codecs.getencoder("css")
inputdecl = input.replace('"x"', '"%s"' % encoding)
outputdecl = input.replace('"x"', '"%s"' % outputencoding)
self.assertEqual(e(inputdecl)[0].decode(encoding), outputdecl)
# Check stateless encoder with specified encoding
self.assertEqual(e(input, encoding=encoding)[0].decode(encoding), outputdecl)
if hasattr(codec, "getincrementalencoder"):
# Check incremental encoder with encoding autodetection
ie = codecs.getincrementalencoder("css")()
self.assertEqual("".join(ie.iterencode(inputdecl)).decode(encoding), outputdecl)
# Check incremental encoder with specified encoding
ie = codecs.getincrementalencoder("css")(encoding=encoding)
self.assertEqual("".join(ie.iterencode(input)).decode(encoding), outputdecl)
# Check stream writer with encoding autodetection
q = Queue()
sw = codecs.getwriter("css")(q)
for c in inputdecl:#.encode(outputencoding): # TODO: .encode()???
sw.write(c)
self.assertEqual(q.read().decode(encoding), input.replace('"x"', '"%s"' % outputencoding))
# Check stream writer with specified encoding
q = Queue()
sw = codecs.getwriter("css")(q, encoding=encoding)
for c in input:
sw.write(c)
self.assertEqual(q.read().decode(encoding), input.replace('"x"', '"%s"' % outputencoding))
# Autodetectable encodings
check("utf-8-sig")
check("utf-16")
check("utf-16-le")
check("utf-16-be")
if haveutf32:
check("utf-32")
check("utf-32-le")
check("utf-32-be")
check("utf-8")
check("iso-8859-1", u'@charset "x";g\xfcrk{}')
check("iso-8859-15")
check("cp1252")
# No recursion
self.assertRaises(ValueError, u'@charset "css";div{}'.encode, "css")
def test_decode_force(self):
"codec.decode (force)"
info = codecs.lookup("css")
def decodeall(input, **kwargs):
# Py 2.5: info.decode('@charset "utf-8"; x')
return info[1](input, **kwargs)[0]
def incdecode(input, **kwargs):
decoder = info.incrementaldecoder(**kwargs)
return decoder.decode(input)
def streamdecode(input, **kwargs):
stream = iostream(input) # py3 .decode('utf-8') but still error?!
reader = info.streamreader(stream, **kwargs)
return reader.read()
for d in (decodeall, incdecode, streamdecode):
# input = '@charset "utf-8"; \xc3\xbf'
# output = u'@charset "utf-8"; \xff'
# self.assertEqual(d(input), output)
#
# input = '@charset "utf-8"; \xc3\xbf'
# output = u'@charset "iso-8859-1"; \xc3\xbf'
# self.assertEqual(d(input, encoding="iso-8859-1", force=True), output)
#
# input = '\xc3\xbf'
# output = u'\xc3\xbf'
# self.assertEqual(d(input, encoding="iso-8859-1", force=True), output)
#
# input = '@charset "utf-8"; \xc3\xbf'
# output = u'@charset "utf-8"; \xff'
# self.assertEqual(d(input, encoding="iso-8859-1", force=False), output)
input = u'@charset "utf-8"; \xff'.encode('utf-8')
output = u'@charset "utf-8"; \xff'
self.assertEqual(d(input), output)
#input = b'@charset "utf-8"; \xc3\xbf'
input = u'@charset "utf-8"; \xff'.encode('utf-8')
output = u'@charset "iso-8859-1"; \xc3\xbf'
self.assertEqual(d(input, encoding="iso-8859-1", force=True), output)
#input = b'\xc3\xbf'
input = u'\xff'.encode('utf-8')
output = u'\xc3\xbf'
self.assertEqual(d(input, encoding="iso-8859-1", force=True), output)
#input = b'@charset "utf-8"; \xc3\xbf'
input = u'@charset "utf-8"; \xff'.encode('utf-8')
output = u'@charset "utf-8"; \xff'
self.assertEqual(d(input, encoding="iso-8859-1", force=False), output)
if __name__ == '__main__':
import unittest
unittest.main()
| mit | ea461feeea64df064dc170bd582b2fdd | 43.862573 | 127 | 0.58398 | 3.680259 | false | false | false | false |
kivy/kivy | kivy/core/video/video_ffmpeg.py | 4 | 2692 | '''
FFmpeg video abstraction
========================
.. versionadded:: 1.0.8
This abstraction requires ffmpeg python extensions. We have made a special
extension that is used for the android platform but can also be used on x86
platforms. The project is available at::
http://github.com/tito/ffmpeg-android
The extension is designed for implementing a video player.
Refer to the documentation of the ffmpeg-android project for more information
about the requirements.
'''
try:
import ffmpeg
except:
raise
from kivy.core.video import VideoBase
from kivy.graphics.texture import Texture
class VideoFFMpeg(VideoBase):
def __init__(self, **kwargs):
self._do_load = False
self._player = None
super(VideoFFMpeg, self).__init__(**kwargs)
def unload(self):
if self._player:
self._player.stop()
self._player = None
self._state = ''
self._do_load = False
def load(self):
self.unload()
def play(self):
if self._player:
self.unload()
self._player = ffmpeg.FFVideo(self._filename)
self._player.set_volume(self._volume)
self._do_load = True
def stop(self):
self.unload()
def seek(self, percent, precise=True):
if self._player is None:
return
self._player.seek(percent)
def _do_eos(self):
self.unload()
self.dispatch('on_eos')
super(VideoFFMpeg, self)._do_eos()
def _update(self, dt):
if self._do_load:
self._player.open()
self._do_load = False
return
player = self._player
if player is None:
return
if not player.is_open:
self._do_eos()
return
frame = player.get_next_frame()
if frame is None:
return
# first time we got a frame, we know that video is read now.
if self._texture is None:
self._texture = Texture.create(size=(
player.get_width(), player.get_height()),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(frame)
self.dispatch('on_frame')
def _get_duration(self):
if self._player is None:
return 0
return self._player.get_duration()
def _get_position(self):
if self._player is None:
return 0
return self._player.get_position()
def _set_volume(self, value):
self._volume = value
if self._player:
self._player.set_volume(self._volume)
| mit | 39c09956c99cc3dc6c46d5a31876cb5b | 24.396226 | 77 | 0.572065 | 4.091185 | false | false | false | false |
kivy/kivy | examples/canvas/circle.py | 21 | 2155 | '''
Circle Example
==============
This example exercises circle (ellipse) drawing. You should see sliders at the
top of the screen with the Kivy logo below it. The sliders control the
angle start and stop and the height and width scales. There is a button
to reset the sliders. The logo used for the circle's background image is
from the kivy/data directory. The entire example is coded in the
kv language description.
'''
from kivy.app import App
from kivy.lang import Builder
kv = '''
BoxLayout:
orientation: 'vertical'
BoxLayout:
size_hint_y: None
height: sp(100)
BoxLayout:
orientation: 'vertical'
Slider:
id: e1
min: -360.
max: 360.
Label:
text: 'angle_start = {}'.format(e1.value)
BoxLayout:
orientation: 'vertical'
Slider:
id: e2
min: -360.
max: 360.
value: 360
Label:
text: 'angle_end = {}'.format(e2.value)
BoxLayout:
size_hint_y: None
height: sp(100)
BoxLayout:
orientation: 'vertical'
Slider:
id: wm
min: 0
max: 2
value: 1
Label:
text: 'Width mult. = {}'.format(wm.value)
BoxLayout:
orientation: 'vertical'
Slider:
id: hm
min: 0
max: 2
value: 1
Label:
text: 'Height mult. = {}'.format(hm.value)
Button:
text: 'Reset ratios'
on_press: wm.value = 1; hm.value = 1
FloatLayout:
canvas:
Color:
rgb: 1, 1, 1
Ellipse:
pos: 100, 100
size: 200 * wm.value, 201 * hm.value
source: 'data/logo/kivy-icon-512.png'
angle_start: e1.value
angle_end: e2.value
'''
class CircleApp(App):
def build(self):
return Builder.load_string(kv)
CircleApp().run()
| mit | 2f3dea453661ff8076d72832f0ed8218 | 24.654762 | 78 | 0.481206 | 4.233792 | false | false | false | false |
kivy/kivy | examples/widgets/codeinput.py | 21 | 7046 | from kivy.app import App
from kivy.extras.highlight import KivyLexer
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.codeinput import CodeInput
from kivy.uix.behaviors import EmacsBehavior
from kivy.uix.popup import Popup
from kivy.properties import ListProperty
from kivy.core.window import Window
from kivy.core.text import LabelBase
from pygments import lexers
import codecs
import os
example_text = '''
---------------------Python----------------------------------
import kivy
kivy.require('1.0.6') # replace with your current kivy version !
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(text='Hello World')
if __name__ == '__main__':
MyApp().run()
----------------------Java-----------------------------------
public static byte toUnsignedByte(int intVal) {
byte byteVal;
return (byte)(intVal & 0xFF);
}
---------------------kv lang---------------------------------
#:kivy 1.0
<YourWidget>:
canvas:
Color:
rgb: .5, .5, .5
Rectangle:
pos: self.pos
size: self.size
---------------------HTML------------------------------------
<!-- Place this tag where you want the +1 button to render. -->
<div class="g-plusone" data-annotation="inline" data-width="300"></div>
<!-- Place this tag after the last +1 button tag. -->
<script type="text/javascript">
(function() {
var po = document.createElement('script');
po.type = 'text/javascript';
po.async = true;
po.src = 'https://apis.google.com/js/plusone.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(po, s);
})();
</script>
----------------------Emacs key bindings---------------------
This CodeInput inherits from EmacsBehavior, so you can use Emacs key bindings
if you want! To try out Emacs key bindings, set the "Key bindings" option to
"Emacs". Experiment with the shortcuts below on some of the text in this window
(just be careful not to delete the cheat sheet before you have made note of the
commands!)
Shortcut Description
-------- -----------
Control + a Move cursor to the beginning of the line
Control + e Move cursor to the end of the line
Control + f Move cursor one character to the right
Control + b Move cursor one character to the left
Alt + f Move cursor to the end of the word to the right
Alt + b Move cursor to the start of the word to the left
Alt + Backspace Delete text left of the cursor to the beginning of word
Alt + d Delete text right of the cursor to the end of the word
Alt + w Copy selection
Control + w Cut selection
Control + y Paste selection
'''
class Fnt_SpinnerOption(SpinnerOption):
pass
class LoadDialog(Popup):
def load(self, path, selection):
self.choosen_file = [None, ]
self.choosen_file = selection
Window.title = selection[0][selection[0].rfind(os.sep) + 1:]
self.dismiss()
def cancel(self):
self.dismiss()
class SaveDialog(Popup):
def save(self, path, selection):
_file = codecs.open(selection, 'w', encoding='utf8')
_file.write(self.text)
Window.title = selection[selection.rfind(os.sep) + 1:]
_file.close()
self.dismiss()
def cancel(self):
self.dismiss()
class CodeInputWithBindings(EmacsBehavior, CodeInput):
'''CodeInput with keybindings.
To add more bindings, add the behavior before CodeInput in the class
definition.
'''
pass
class CodeInputTest(App):
files = ListProperty([None, ])
def build(self):
b = BoxLayout(orientation='vertical')
languages = Spinner(
text='language',
values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))
languages.bind(text=self.change_lang)
menu = BoxLayout(
size_hint_y=None,
height='30pt')
fnt_size = Spinner(
text='12',
values=list(map(str, list(range(5, 40)))))
fnt_size.bind(text=self._update_size)
fonts = [
file for file in LabelBase._font_dirs_files
if file.endswith('.ttf')]
fnt_name = Spinner(
text='RobotoMono',
option_cls=Fnt_SpinnerOption,
values=fonts)
fnt_name.bind(text=self._update_font)
mnu_file = Spinner(
text='File',
values=('Open', 'SaveAs', 'Save', 'Close'))
mnu_file.bind(text=self._file_menu_selected)
key_bindings = Spinner(
text='Key bindings',
values=('Default key bindings', 'Emacs key bindings'))
key_bindings.bind(text=self._bindings_selected)
menu.add_widget(mnu_file)
menu.add_widget(fnt_size)
menu.add_widget(fnt_name)
menu.add_widget(languages)
menu.add_widget(key_bindings)
b.add_widget(menu)
self.codeinput = CodeInputWithBindings(
lexer=KivyLexer(),
font_size=12,
text=example_text,
key_bindings='default',
)
b.add_widget(self.codeinput)
return b
def _update_size(self, instance, size):
self.codeinput.font_size = float(size)
def _update_font(self, instance, fnt_name):
instance.font_name = self.codeinput.font_name = fnt_name
def _file_menu_selected(self, instance, value):
if value == 'File':
return
instance.text = 'File'
if value == 'Open':
if not hasattr(self, 'load_dialog'):
self.load_dialog = LoadDialog()
self.load_dialog.open()
self.load_dialog.bind(choosen_file=self.setter('files'))
elif value == 'SaveAs':
if not hasattr(self, 'saveas_dialog'):
self.saveas_dialog = SaveDialog()
self.saveas_dialog.text = self.codeinput.text
self.saveas_dialog.open()
elif value == 'Save':
if self.files[0]:
_file = codecs.open(self.files[0], 'w', encoding='utf8')
_file.write(self.codeinput.text)
_file.close()
elif value == 'Close':
if self.files[0]:
self.codeinput.text = ''
Window.title = 'untitled'
def _bindings_selected(self, instance, value):
value = value.split(' ')[0]
self.codeinput.key_bindings = value.lower()
def on_files(self, instance, values):
if not values[0]:
return
_file = codecs.open(values[0], 'r', encoding='utf8')
self.codeinput.text = _file.read()
_file.close()
def change_lang(self, instance, z):
if z == 'KvLexer':
lx = KivyLexer()
else:
lx = lexers.get_lexer_by_name(lexers.LEXERS[z][2][0])
self.codeinput.lexer = lx
if __name__ == '__main__':
CodeInputTest().run()
| mit | 6396c06f4aacadc56257e7bd2ff13613 | 30.315556 | 79 | 0.580329 | 3.843972 | false | false | false | false |
kivy/kivy | kivy/uix/modalview.py | 2 | 10750 | """
ModalView
=========
.. versionadded:: 1.4.0
The :class:`ModalView` widget is used to create modal views. By default, the
view will cover the whole "main" window.
Remember that the default size of a Widget is size_hint=(1, 1). If you don't
want your view to be fullscreen, either use size hints with values lower than
1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed
size attributes.
Examples
--------
Example of a simple 400x400 Hello world view::
view = ModalView(size_hint=(None, None), size=(400, 400))
view.add_widget(Label(text='Hello world'))
By default, any click outside the view will dismiss it. If you don't
want that, you can set :attr:`ModalView.auto_dismiss` to False::
view = ModalView(auto_dismiss=False)
view.add_widget(Label(text='Hello world'))
view.open()
To manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of
the ModalView instance::
view.dismiss()
Both :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bind-able. That
means you can directly bind the function to an action, e.g. to a button's
on_press ::
# create content and add it to the view
content = Button(text='Close me!')
view = ModalView(auto_dismiss=False)
view.add_widget(content)
# bind the on_press event of the button to the dismiss function
content.bind(on_press=view.dismiss)
# open the view
view.open()
ModalView Events
----------------
There are four events available: `on_pre_open` and `on_open` which are raised
when the view is opening; `on_pre_dismiss` and `on_dismiss` which are raised
when the view is closed.
For `on_dismiss`, you can prevent the view from closing by explicitly
returning `True` from your callback::
def my_callback(instance):
print('ModalView', instance, 'is being dismissed, but is prevented!')
return True
view = ModalView()
view.add_widget(Label(text='Hello world'))
view.bind(on_dismiss=my_callback)
view.open()
.. versionchanged:: 1.5.0
The ModalView can be closed by hitting the escape key on the
keyboard if the :attr:`ModalView.auto_dismiss` property is True (the
default).
"""
__all__ = ('ModalView', )
from kivy.animation import Animation
from kivy.properties import (
StringProperty, BooleanProperty, ObjectProperty, NumericProperty,
ListProperty, ColorProperty)
from kivy.uix.anchorlayout import AnchorLayout
class ModalView(AnchorLayout):
"""ModalView class. See module documentation for more information.
:Events:
`on_pre_open`:
Fired before the ModalView is opened. When this event is fired
ModalView is not yet added to window.
`on_open`:
Fired when the ModalView is opened.
`on_pre_dismiss`:
Fired before the ModalView is closed.
`on_dismiss`:
Fired when the ModalView is closed. If the callback returns True,
the dismiss will be canceled.
.. versionchanged:: 1.11.0
Added events `on_pre_open` and `on_pre_dismiss`.
.. versionchanged:: 2.0.0
Added property 'overlay_color'.
.. versionchanged:: 2.1.0
Marked `attach_to` property as deprecated.
"""
# noinspection PyArgumentEqualDefault
auto_dismiss = BooleanProperty(True)
'''This property determines if the view is automatically
dismissed when the user clicks outside it.
:attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
attach_to = ObjectProperty(None, deprecated=True)
'''If a widget is set on attach_to, the view will attach to the nearest
parent window of the widget. If none is found, it will attach to the
main/global Window.
:attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
background_color = ColorProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
This acts as a *multiplier* to the texture colour. The default
texture is grey, so just setting the background color will give
a darker result. To set a plain color, set the
:attr:`background_normal` to ``''``.
The :attr:`background_color` is a
:class:`~kivy.properties.ColorProperty` and defaults to [1, 1, 1, 1].
.. versionchanged:: 2.0.0
Changed behavior to affect the background of the widget itself, not
the overlay dimming.
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
background = StringProperty(
'atlas://data/images/defaulttheme/modalview-background')
'''Background image of the view used for the view background.
:attr:`background` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/modalview-background'.
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used for the :attr:`background_normal` and the
:attr:`background_down` properties. Can be used when using custom
backgrounds.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16).
'''
overlay_color = ColorProperty([0, 0, 0, .7])
'''Overlay color in the format (r, g, b, a).
Used for dimming the window behind the modal view.
:attr:`overlay_color` is a :class:`~kivy.properties.ColorProperty` and
defaults to [0, 0, 0, .7].
.. versionadded:: 2.0.0
'''
# Internals properties used for graphical representation.
_anim_alpha = NumericProperty(0)
_anim_duration = NumericProperty(.1)
_window = ObjectProperty(allownone=True, rebind=True)
_is_open = BooleanProperty(False)
_touch_started_inside = None
__events__ = ('on_pre_open', 'on_open', 'on_pre_dismiss', 'on_dismiss')
def __init__(self, **kwargs):
self._parent = None
super(ModalView, self).__init__(**kwargs)
def open(self, *_args, **kwargs):
"""Display the modal in the Window.
When the view is opened, it will be faded in with an animation. If you
don't want the animation, use::
view.open(animation=False)
"""
from kivy.core.window import Window
if self._is_open:
return
self._window = Window
self._is_open = True
self.dispatch('on_pre_open')
Window.add_widget(self)
Window.bind(
on_resize=self._align_center,
on_keyboard=self._handle_keyboard)
self.center = Window.center
self.fbind('center', self._align_center)
self.fbind('size', self._align_center)
if kwargs.get('animation', True):
ani = Animation(_anim_alpha=1., d=self._anim_duration)
ani.bind(on_complete=lambda *_args: self.dispatch('on_open'))
ani.start(self)
else:
self._anim_alpha = 1.
self.dispatch('on_open')
def dismiss(self, *_args, **kwargs):
""" Close the view if it is open.
If you really want to close the view, whatever the on_dismiss
event returns, you can use the *force* keyword argument::
view = ModalView()
view.dismiss(force=True)
When the view is dismissed, it will be faded out before being
removed from the parent. If you don't want this animation, use::
view.dismiss(animation=False)
"""
if not self._is_open:
return
self.dispatch('on_pre_dismiss')
if self.dispatch('on_dismiss') is True:
if kwargs.get('force', False) is not True:
return
if kwargs.get('animation', True):
Animation(_anim_alpha=0., d=self._anim_duration).start(self)
else:
self._anim_alpha = 0
self._real_remove_widget()
def _align_center(self, *_args):
if self._is_open:
self.center = self._window.center
def on_motion(self, etype, me):
super().on_motion(etype, me)
return True
def on_touch_down(self, touch):
""" touch down event handler. """
self._touch_started_inside = self.collide_point(*touch.pos)
if not self.auto_dismiss or self._touch_started_inside:
super().on_touch_down(touch)
return True
def on_touch_move(self, touch):
""" touch moved event handler. """
if not self.auto_dismiss or self._touch_started_inside:
super().on_touch_move(touch)
return True
def on_touch_up(self, touch):
""" touch up event handler. """
# Explicitly test for False as None occurs when shown by on_touch_down
if self.auto_dismiss and self._touch_started_inside is False:
self.dismiss()
else:
super().on_touch_up(touch)
self._touch_started_inside = None
return True
def on__anim_alpha(self, _instance, value):
""" animation progress callback. """
if value == 0 and self._is_open:
self._real_remove_widget()
def _real_remove_widget(self):
if not self._is_open:
return
self._window.remove_widget(self)
self._window.unbind(
on_resize=self._align_center,
on_keyboard=self._handle_keyboard)
self._is_open = False
self._window = None
def on_pre_open(self):
""" default pre-open event handler. """
def on_open(self):
""" default open event handler. """
def on_pre_dismiss(self):
""" default pre-dismiss event handler. """
def on_dismiss(self):
""" default dismiss event handler. """
def _handle_keyboard(self, _window, key, *_args):
if key == 27 and self.auto_dismiss:
self.dismiss()
return True
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
# add view
content = GridLayout(cols=1)
content.add_widget(Label(text='This is a hello world'))
view = ModalView(size_hint=(None, None), size=(256, 256))
view.add_widget(content)
layout = GridLayout(cols=3)
for x in range(9):
btn = Button(text=f"click me {x}")
btn.bind(on_release=view.open)
layout.add_widget(btn)
Window.add_widget(layout)
view.open()
runTouchApp()
| mit | 55d062b70854f9f21d8cdcc4b78b1fa9 | 30.710914 | 79 | 0.633581 | 3.848908 | false | false | false | false |
kivy/kivy | kivy/app.py | 1 | 42538 | '''
Application
===========
The :class:`App` class is the base for creating Kivy applications.
Think of it as your main entry point into the Kivy run loop. In most
cases, you subclass this class and make your own app. You create an
instance of your specific app class and then, when you are ready to
start the application's life cycle, you call your instance's
:meth:`App.run` method.
Creating an Application
-----------------------
Method using build() override
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To initialize your app with a widget tree, override the :meth:`~App.build`
method in your app class and return the widget tree you constructed.
Here's an example of a very simple application that just shows a button:
.. include:: ../../examples/application/app_with_build.py
:literal:
The file is also available in the examples folder at
:file:`kivy/examples/application/app_with_build.py`.
Here, no widget tree was constructed (or if you will, a tree with only
the root node).
Method using kv file
~~~~~~~~~~~~~~~~~~~~
You can also use the :doc:`api-kivy.lang` for creating applications. The
.kv can contain rules and root widget definitions at the same time. Here
is the same example as the Button one in a kv file.
Contents of 'test.kv':
.. include:: ../../examples/application/test.kv
:literal:
Contents of 'main.py':
.. include:: ../../examples/application/app_with_kv.py
:literal:
See :file:`kivy/examples/application/app_with_kv.py`.
The relationship between main.py and test.kv is explained in
:meth:`App.load_kv`.
.. _Application configuration:
Application configuration
-------------------------
Use the configuration file
~~~~~~~~~~~~~~~~~~~~~~~~~~
Your application might need its own configuration file. The
:class:`App` class handles 'ini' files automatically if you add
the section key-value pair to the :meth:`App.build_config` method using the
`config` parameter (an instance of :class:`~kivy.config.ConfigParser`)::
class TestApp(App):
def build_config(self, config):
config.setdefaults('section1', {
'key1': 'value1',
'key2': '42'
})
As soon as you add one section to the config, a file is created on the
disk (see :attr:`~App.get_application_config` for its location) and
named based your class name. "TestApp" will give a config file named
"test.ini" with the content::
[section1]
key1 = value1
key2 = 42
The "test.ini" will be automatically loaded at runtime and you can access the
configuration in your :meth:`App.build` method::
class TestApp(App):
def build_config(self, config):
config.setdefaults('section1', {
'key1': 'value1',
'key2': '42'
})
def build(self):
config = self.config
return Label(text='key1 is %s and key2 is %d' % (
config.get('section1', 'key1'),
config.getint('section1', 'key2')))
Create a settings panel
~~~~~~~~~~~~~~~~~~~~~~~
Your application can have a settings panel to let your user configure some of
your config tokens. Here is an example done in the KinectViewer example
(available in the examples directory):
.. image:: images/app-settings.jpg
:align: center
You can add your own panels of settings by extending
the :meth:`App.build_settings` method.
Check the :class:`~kivy.uix.settings.Settings` about how to create a panel,
because you need a JSON file / data first.
Let's take as an example the previous snippet of TestApp with custom
config. We could create a JSON like this::
[
{ "type": "title",
"title": "Test application" },
{ "type": "options",
"title": "My first key",
"desc": "Description of my first key",
"section": "section1",
"key": "key1",
"options": ["value1", "value2", "another value"] },
{ "type": "numeric",
"title": "My second key",
"desc": "Description of my second key",
"section": "section1",
"key": "key2" }
]
Then, we can create a panel using this JSON to automatically create all the
options and link them to our :attr:`App.config` ConfigParser instance::
class TestApp(App):
# ...
def build_settings(self, settings):
jsondata = """... put the json data here ..."""
settings.add_json_panel('Test application',
self.config, data=jsondata)
That's all! Now you can press F1 (default keystroke) to toggle the
settings panel or press the "settings" key on your android device. You
can manually call :meth:`App.open_settings` and
:meth:`App.close_settings` if you want to handle this manually. Every
change in the panel is automatically saved in the config file.
You can also use :meth:`App.build_settings` to modify properties of
the settings panel. For instance, the default panel has a sidebar for
switching between json panels whose width defaults to 200dp. If you'd
prefer this to be narrower, you could add::
settings.interface.menu.width = dp(100)
to your :meth:`build_settings` method.
You might want to know when a config value has been changed by the
user in order to adapt or reload your UI. You can then overload the
:meth:`on_config_change` method::
class TestApp(App):
# ...
def on_config_change(self, config, section, key, value):
if config is self.config:
token = (section, key)
if token == ('section1', 'key1'):
print('Our key1 has been changed to', value)
elif token == ('section1', 'key2'):
print('Our key2 has been changed to', value)
The Kivy configuration panel is added by default to the settings
instance. If you don't want this panel, you can declare your Application as
follows::
class TestApp(App):
use_kivy_settings = False
# ...
This only removes the Kivy panel but does not stop the settings instance
from appearing. If you want to prevent the settings instance from appearing
altogether, you can do this::
class TestApp(App):
def open_settings(self, *largs):
pass
.. versionadded:: 1.0.7
Profiling with on_start and on_stop
-----------------------------------
It is often useful to profile python code in order to discover locations to
optimise. The standard library profilers
(http://docs.python.org/2/library/profile.html) provides multiple options for
profiling code. For profiling the entire program, the natural
approaches of using profile as a module or profile's run method does not work
with Kivy. It is however, possible to use :meth:`App.on_start` and
:meth:`App.on_stop` methods::
import cProfile
class MyApp(App):
def on_start(self):
self.profile = cProfile.Profile()
self.profile.enable()
def on_stop(self):
self.profile.disable()
self.profile.dump_stats('myapp.profile')
This will create a file called `myapp.profile` when you exit your app.
Customising layout
------------------
You can choose different settings widget layouts by setting
:attr:`App.settings_cls`. By default, this is a
:class:`~kivy.uix.settings.Settings` class which provides the pictured
sidebar layout, but you could set it to any of the other layouts
provided in :mod:`kivy.uix.settings` or create your own. See the
module documentation for :mod:`kivy.uix.settings` for more
information.
You can customise how the settings panel is displayed by
overriding :meth:`App.display_settings` which is called before
displaying the settings panel on the screen. By default, it
simply draws the panel on top of the window, but you could modify it
to (for instance) show the settings in a
:class:`~kivy.uix.popup.Popup` or add it to your app's
:class:`~kivy.uix.screenmanager.ScreenManager` if you are using
one. If you do so, you should also modify :meth:`App.close_settings`
to exit the panel appropriately. For instance, to have the settings
panel appear in a popup you can do::
def display_settings(self, settings):
try:
p = self.settings_popup
except AttributeError:
self.settings_popup = Popup(content=settings,
title='Settings',
size_hint=(0.8, 0.8))
p = self.settings_popup
if p.content is not settings:
p.content = settings
p.open()
def close_settings(self, *args):
try:
p = self.settings_popup
p.dismiss()
except AttributeError:
pass # Settings popup doesn't exist
Finally, if you want to replace the current settings panel widget, you
can remove the internal references to it using
:meth:`App.destroy_settings`. If you have modified
:meth:`App.display_settings`, you should be careful to detect if the
settings panel has been replaced.
Pause mode
----------
.. versionadded:: 1.1.0
On tablets and phones, the user can switch at any moment to another
application. By default, your application will close and the
:meth:`App.on_stop` event will be fired.
If you support Pause mode, when switching to another application, your
application will wait indefinitely until the user
switches back to your application. There is an issue with OpenGL on Android
devices: it is not guaranteed that the OpenGL ES Context will be restored when
your app resumes. The mechanism for restoring all the OpenGL data is not yet
implemented in Kivy.
The currently implemented Pause mechanism is:
#. Kivy checks every frame if Pause mode is activated by the Operating
System due to the user switching to another application, a phone
shutdown or any other reason.
#. :meth:`App.on_pause` is called:
#. If False is returned or :meth:`App.on_pause` has no return statement,
then :meth:`App.on_stop` is called.
#. If True is returned or :meth:`App.on_pause` is not defined, the
application will sleep until the OS resumes our App.
#. When the app is resumed, :meth:`App.on_resume` is called.
#. If our app memory has been reclaimed by the OS, then nothing will be
called.
Here is a simple example of how on_pause() should be used::
class TestApp(App):
def on_pause(self):
# Here you can save data if needed
return True
def on_resume(self):
# Here you can check if any data needs replacing (usually nothing)
pass
.. warning::
Both `on_pause` and `on_stop` must save important data because after
`on_pause` is called, `on_resume` may not be called at all.
Asynchronous app
----------------
In addition to running an app normally,
Kivy can be run within an async event loop such as provided by the standard
library asyncio package or the trio package (highly recommended).
Background
~~~~~~~~~~
Normally, when a Kivy app is run, it blocks the thread that runs it until the
app exits. Internally, at each clock iteration it executes all the app
callbacks, handles graphics and input, and idles by sleeping for any remaining
time.
To be able to run asynchronously, the Kivy app may not sleep, but instead must
release control of the running context to the asynchronous event loop running
the Kivy app. We do this when idling by calling the appropriate functions of
the async package being used instead of sleeping.
Async configuration
~~~~~~~~~~~~~~~~~~~
To run a Kivy app asynchronously, either the :func:`async_runTouchApp` or
:meth:`App.async_run` coroutine must be scheduled to run in the event loop of
the async library being used.
The environmental variable ``KIVY_EVENTLOOP`` or the ``async_lib`` parameter in
:func:`async_runTouchApp` and :meth:`App.async_run` set the async
library that Kivy uses internally when the app is run with
:func:`async_runTouchApp` and :meth:`App.async_run`. It can be set to one of
`"asyncio"` when the standard library `asyncio` is used, or `"trio"` if the
trio library is used. If the environment variable is not set and ``async_lib``
is not provided, the stdlib ``asyncio`` is used.
:meth:`~kivy.clock.ClockBaseBehavior.init_async_lib` can also be directly
called to set the async library to use, but it may only be called before the
app has begun running with :func:`async_runTouchApp` or :meth:`App.async_run`.
To run the app asynchronously, one schedules :func:`async_runTouchApp`
or :meth:`App.async_run` to run within the given library's async event loop as
in the examples shown below. Kivy is then treated as just another coroutine
that the given library runs in its event loop. Internally, Kivy will use the
specified async library's API, so ``KIVY_EVENTLOOP`` or ``async_lib`` must
match the async library that is running Kivy.
For a fuller basic and more advanced examples, see the demo apps in
``examples/async``.
Asyncio example
~~~~~~~~~~~~~--
.. code-block:: python
import asyncio
from kivy.app import async_runTouchApp
from kivy.uix.label import Label
loop = asyncio.get_event_loop()
loop.run_until_complete(
async_runTouchApp(Label(text='Hello, World!'), async_lib='asyncio'))
loop.close()
Trio example
~~~~~~~~~~--
.. code-block:: python
import trio
from kivy.app import async_runTouchApp
from kivy.uix.label import Label
from functools import partial
# use functools.partial() to pass keyword arguments:
async_runTouchApp_func = partial(async_runTouchApp, async_lib='trio')
trio.run(async_runTouchApp_func, Label(text='Hello, World!'))
Interacting with Kivy app from other coroutines
-----------------------------------------------
It is fully safe to interact with any kivy object from other coroutines
running within the same async event loop. This is because they are all running
from the same thread and the other coroutines are only executed when Kivy
is idling.
Similarly, the kivy callbacks may safely interact with objects from other
coroutines running in the same event loop. Normal single threaded rules apply
to both case.
.. versionadded:: 2.0.0
'''
__all__ = ('App', 'runTouchApp', 'async_runTouchApp', 'stopTouchApp')
import os
from inspect import getfile
from os.path import dirname, join, exists, sep, expanduser, isfile
from kivy.config import ConfigParser
from kivy.base import runTouchApp, async_runTouchApp, stopTouchApp
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.resources import resource_find
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty
from kivy.setupconfig import USE_SDL2
class App(EventDispatcher):
''' Application class, see module documentation for more information.
:Events:
`on_start`:
Fired when the application is being started (before the
:func:`~kivy.base.runTouchApp` call.
`on_stop`:
Fired when the application stops.
`on_pause`:
Fired when the application is paused by the OS.
`on_resume`:
Fired when the application is resumed from pause by the OS. Beware:
you have no guarantee that this event will be fired after the
`on_pause` event has been called.
.. versionchanged:: 1.7.0
Parameter `kv_file` added.
.. versionchanged:: 1.8.0
Parameters `kv_file` and `kv_directory` are now properties of App.
'''
title = StringProperty(None)
'''
Title of your application. You can set this as follows::
class MyApp(App):
def build(self):
self.title = 'Hello world'
.. versionadded:: 1.0.5
.. versionchanged:: 1.8.0
`title` is now a :class:`~kivy.properties.StringProperty`. Don't
set the title in the class as previously stated in the documentation.
.. note::
For Kivy < 1.8.0, you can set this as follows::
class MyApp(App):
title = 'Custom title'
If you want to dynamically change the title, you can do::
from kivy.base import EventLoop
EventLoop.window.title = 'New title'
'''
icon = StringProperty(None)
'''Icon of your application.
The icon can be located in the same directory as your main file. You can
set this as follows::
class MyApp(App):
def build(self):
self.icon = 'myicon.png'
.. versionadded:: 1.0.5
.. versionchanged:: 1.8.0
`icon` is now a :class:`~kivy.properties.StringProperty`. Don't set the
icon in the class as previously stated in the documentation.
.. note::
For Kivy prior to 1.8.0, you need to set this as follows::
class MyApp(App):
icon = 'customicon.png'
Recommended 256x256 or 1024x1024? for GNU/Linux and Mac OSX
32x32 for Windows7 or less. <= 256x256 for windows 8
256x256 does work (on Windows 8 at least), but is scaled
down and doesn't look as good as a 32x32 icon.
'''
use_kivy_settings = True
'''.. versionadded:: 1.0.7
If True, the application settings will also include the Kivy settings. If
you don't want the user to change any kivy settings from your settings UI,
change this to False.
'''
settings_cls = ObjectProperty(None)
'''.. versionadded:: 1.8.0
The class used to construct the settings panel and
the instance passed to :meth:`build_config`. You should
use either :class:`~kivy.uix.settings.Settings` or one of the provided
subclasses with different layouts
(:class:`~kivy.uix.settings.SettingsWithSidebar`,
:class:`~kivy.uix.settings.SettingsWithSpinner`,
:class:`~kivy.uix.settings.SettingsWithTabbedPanel`,
:class:`~kivy.uix.settings.SettingsWithNoMenu`). You can also create your
own Settings subclass. See the documentation
of :mod:`~kivy.uix.settings.Settings` for more information.
:attr:`~App.settings_cls` is an :class:`~kivy.properties.ObjectProperty`
and defaults to :class:`~kivy.uix.settings.SettingsWithSpinner` which
displays settings panels with a spinner to switch between them. If you set
a string, the :class:`~kivy.factory.Factory` will be used to resolve the
class.
'''
kv_directory = StringProperty(None)
'''Path of the directory where application kv is stored, defaults to None
.. versionadded:: 1.8.0
If a kv_directory is set, it will be used to get the initial kv file. By
default, the file is assumed to be in the same directory as the current App
definition file.
'''
kv_file = StringProperty(None)
'''Filename of the Kv file to load, defaults to None.
.. versionadded:: 1.8.0
If a kv_file is set, it will be loaded when the application starts. The
loading of the "default" kv file will be prevented.
'''
# Return the current running App instance
_running_app = None
__events__ = ('on_start', 'on_stop', 'on_pause', 'on_resume',
'on_config_change', )
# Stored so that we only need to determine this once
_user_data_dir = ""
def __init__(self, **kwargs):
App._running_app = self
self._app_directory = None
self._app_name = None
self._app_settings = None
self._app_window = None
super(App, self).__init__(**kwargs)
self.built = False
#: Options passed to the __init__ of the App
self.options = kwargs
#: Returns an instance of the :class:`~kivy.config.ConfigParser` for
#: the application configuration. You can use this to query some config
#: tokens in the :meth:`build` method.
self.config = None
#: The *root* widget returned by the :meth:`build` method or by the
#: :meth:`load_kv` method if the kv file contains a root widget.
self.root = None
def build(self):
'''Initializes the application; it will be called only once.
If this method returns a widget (tree), it will be used as the root
widget and added to the window.
:return:
None or a root :class:`~kivy.uix.widget.Widget` instance
if no self.root exists.'''
if not self.root:
return Widget()
def build_config(self, config):
'''.. versionadded:: 1.0.7
This method is called before the application is initialized to
construct your :class:`~kivy.config.ConfigParser` object. This
is where you can put any default section / key / value for your
config. If anything is set, the configuration will be
automatically saved in the file returned by
:meth:`get_application_config`.
:Parameters:
`config`: :class:`~kivy.config.ConfigParser`
Use this to add default section / key / value items
'''
def build_settings(self, settings):
'''.. versionadded:: 1.0.7
This method is called when the user (or you) want to show the
application settings. It is called once when the settings panel
is first opened, after which the panel is cached. It may be
called again if the cached settings panel is removed by
:meth:`destroy_settings`.
You can use this method to add settings panels and to
customise the settings widget e.g. by changing the sidebar
width. See the module documentation for full details.
:Parameters:
`settings`: :class:`~kivy.uix.settings.Settings`
Settings instance for adding panels
'''
def load_kv(self, filename=None):
'''This method is invoked the first time the app is being run if no
widget tree has been constructed before for this app.
This method then looks for a matching kv file in the same directory as
the file that contains the application class.
For example, say you have a file named main.py that contains::
class ShowcaseApp(App):
pass
This method will search for a file named `showcase.kv` in
the directory that contains main.py. The name of the kv file has to be
the lowercase name of the class, without the 'App' postfix at the end
if it exists.
You can define rules and a root widget in your kv file::
<ClassName>: # this is a rule
...
ClassName: # this is a root widget
...
There must be only one root widget. See the :doc:`api-kivy.lang`
documentation for more information on how to create kv files. If your
kv file contains a root widget, it will be used as self.root, the root
widget for the application.
.. note::
This function is called from :meth:`run`, therefore, any widget
whose styling is defined in this kv file and is created before
:meth:`run` is called (e.g. in `__init__`), won't have its styling
applied. Note that :meth:`build` is called after :attr:`load_kv`
has been called.
'''
# Detect filename automatically if it was not specified.
if filename:
filename = resource_find(filename)
else:
try:
default_kv_directory = dirname(getfile(self.__class__))
if default_kv_directory == '':
default_kv_directory = '.'
except TypeError:
# if it's a builtin module.. use the current dir.
default_kv_directory = '.'
kv_directory = self.kv_directory or default_kv_directory
clsname = self.__class__.__name__.lower()
if (clsname.endswith('app') and
not isfile(join(kv_directory, '%s.kv' % clsname))):
clsname = clsname[:-3]
filename = join(kv_directory, '%s.kv' % clsname)
# Load KV file
Logger.debug('App: Loading kv <{0}>'.format(filename))
rfilename = resource_find(filename)
if rfilename is None or not exists(rfilename):
Logger.debug('App: kv <%s> not found' % filename)
return False
root = Builder.load_file(rfilename)
if root:
self.root = root
return True
def get_application_name(self):
'''Return the name of the application.
'''
if self.title is not None:
return self.title
clsname = self.__class__.__name__
if clsname.endswith('App'):
clsname = clsname[:-3]
return clsname
def get_application_icon(self):
'''Return the icon of the application.
'''
if not resource_find(self.icon):
return ''
else:
return resource_find(self.icon)
def get_application_config(self, defaultpath='%(appdir)s/%(appname)s.ini'):
'''
Return the filename of your application configuration. Depending
on the platform, the application file will be stored in
different locations:
- on iOS: <appdir>/Documents/.<appname>.ini
- on Android: <user_data_dir>/.<appname>.ini
- otherwise: <appdir>/<appname>.ini
When you are distributing your application on Desktops, please
note that if the application is meant to be installed
system-wide, the user might not have write-access to the
application directory. If you want to store user settings, you
should overload this method and change the default behavior to
save the configuration file in the user directory. ::
class TestApp(App):
def get_application_config(self):
return super(TestApp, self).get_application_config(
'~/.%(appname)s.ini')
Some notes:
- The tilda '~' will be expanded to the user directory.
- %(appdir)s will be replaced with the application :attr:`directory`
- %(appname)s will be replaced with the application :attr:`name`
.. versionadded:: 1.0.7
.. versionchanged:: 1.4.0
Customized the defaultpath for iOS and Android platforms. Added a
defaultpath parameter for desktop OS's (not applicable to iOS
and Android.)
.. versionchanged:: 1.11.0
Changed the Android version to make use of the
:attr:`~App.user_data_dir` and added a missing dot to the iOS
config file name.
'''
if platform == 'android':
return join(self.user_data_dir, '.{0}.ini'.format(self.name))
elif platform == 'ios':
defaultpath = '~/Documents/.%(appname)s.ini'
elif platform == 'win':
defaultpath = defaultpath.replace('/', sep)
return expanduser(defaultpath) % {
'appname': self.name, 'appdir': self.directory}
@property
def root_window(self):
'''.. versionadded:: 1.9.0
Returns the root window instance used by :meth:`run`.
'''
return self._app_window
def load_config(self):
'''(internal) This function is used for returning a ConfigParser with
the application configuration. It's doing 3 things:
#. Creating an instance of a ConfigParser
#. Loading the default configuration by calling
:meth:`build_config`, then
#. If it exists, it loads the application configuration file,
otherwise it creates one.
:return:
:class:`~kivy.config.ConfigParser` instance
'''
try:
config = ConfigParser.get_configparser('app')
except KeyError:
config = None
if config is None:
config = ConfigParser(name='app')
self.config = config
self.build_config(config)
# if no sections are created, that's mean the user don't have
# configuration.
if len(config.sections()) == 0:
return
# ok, the user have some sections, read the default file if exist
# or write it !
filename = self.get_application_config()
if filename is None:
return config
Logger.debug('App: Loading configuration <{0}>'.format(filename))
if exists(filename):
try:
config.read(filename)
except:
Logger.error('App: Corrupted config file, ignored.')
config.name = ''
try:
config = ConfigParser.get_configparser('app')
except KeyError:
config = None
if config is None:
config = ConfigParser(name='app')
self.config = config
self.build_config(config)
pass
else:
Logger.debug('App: First configuration, create <{0}>'.format(
filename))
config.filename = filename
config.write()
return config
@property
def directory(self):
'''.. versionadded:: 1.0.7
Return the directory where the application lives.
'''
if self._app_directory is None:
try:
self._app_directory = dirname(getfile(self.__class__))
if self._app_directory == '':
self._app_directory = '.'
except TypeError:
# if it's a builtin module.. use the current dir.
self._app_directory = '.'
return self._app_directory
def _get_user_data_dir(self):
# Determine and return the user_data_dir.
data_dir = ""
if platform == 'ios':
data_dir = expanduser(join('~/Documents', self.name))
elif platform == 'android':
from jnius import autoclass, cast
PythonActivity = autoclass('org.kivy.android.PythonActivity')
context = cast('android.content.Context', PythonActivity.mActivity)
file_p = cast('java.io.File', context.getFilesDir())
data_dir = file_p.getAbsolutePath()
elif platform == 'win':
data_dir = os.path.join(os.environ['APPDATA'], self.name)
elif platform == 'macosx':
data_dir = '~/Library/Application Support/{}'.format(self.name)
data_dir = expanduser(data_dir)
else: # _platform == 'linux' or anything else...:
data_dir = os.environ.get('XDG_CONFIG_HOME', '~/.config')
data_dir = expanduser(join(data_dir, self.name))
if not exists(data_dir):
os.mkdir(data_dir)
return data_dir
@property
def user_data_dir(self):
'''
.. versionadded:: 1.7.0
Returns the path to the directory in the users file system which the
application can use to store additional data.
Different platforms have different conventions with regards to where
the user can store data such as preferences, saved games and settings.
This function implements these conventions. The <app_name> directory
is created when the property is called, unless it already exists.
On iOS, `~/Documents/<app_name>` is returned (which is inside the
app's sandbox).
On Windows, `%APPDATA%/<app_name>` is returned.
On OS X, `~/Library/Application Support/<app_name>` is returned.
On Linux, `$XDG_CONFIG_HOME/<app_name>` is returned.
On Android, `Context.GetFilesDir
<https://developer.android.com/reference/android/content/\
Context.html#getFilesDir()>`_ is returned.
.. versionchanged:: 1.11.0
On Android, this function previously returned
`/sdcard/<app_name>`. This folder became read-only by default
in Android API 26 and the user_data_dir has therefore been moved
to a writeable location.
'''
if self._user_data_dir == "":
self._user_data_dir = self._get_user_data_dir()
return self._user_data_dir
@property
def name(self):
'''.. versionadded:: 1.0.7
Return the name of the application based on the class name.
'''
if self._app_name is None:
clsname = self.__class__.__name__
if clsname.endswith('App'):
clsname = clsname[:-3]
self._app_name = clsname.lower()
return self._app_name
def _run_prepare(self):
if not self.built:
self.load_config()
self.load_kv(filename=self.kv_file)
root = self.build()
if root:
self.root = root
if self.root:
if not isinstance(self.root, Widget):
Logger.critical('App.root must be an _instance_ of Widget')
raise Exception('Invalid instance in App.root')
from kivy.core.window import Window
Window.add_widget(self.root)
# Check if the window is already created
from kivy.base import EventLoop
window = EventLoop.window
if window:
self._app_window = window
window.set_title(self.get_application_name())
icon = self.get_application_icon()
if icon:
window.set_icon(icon)
self._install_settings_keys(window)
else:
Logger.critical("Application: No window is created."
" Terminating application run.")
return
self.dispatch('on_start')
def run(self):
'''Launches the app in standalone mode.
'''
self._run_prepare()
runTouchApp()
self._stop()
async def async_run(self, async_lib=None):
'''Identical to :meth:`run`, but is a coroutine and can be
scheduled in a running async event loop.
See :mod:`kivy.app` for example usage.
.. versionadded:: 2.0.0
'''
self._run_prepare()
await async_runTouchApp(async_lib=async_lib)
self._stop()
def stop(self, *largs):
'''Stop the application.
If you use this method, the whole application will stop by issuing
a call to :func:`~kivy.base.stopTouchApp`.
Except on Android, set Android state to stop, Kivy state then follows.
'''
if platform == 'android':
from android import mActivity
mActivity.finishAndRemoveTask()
else:
self._stop()
def _stop(self, *largs):
self.dispatch('on_stop')
stopTouchApp()
# Clear the window children
if self._app_window:
for child in self._app_window.children:
self._app_window.remove_widget(child)
App._running_app = None
def pause(self, *largs):
'''Pause the application.
On Android set OS state to pause, Kivy app state follows.
No functionality on other OS.
.. versionadded:: 2.2.0
'''
if platform == 'android':
from android import mActivity
mActivity.moveTaskToBack(True)
else:
Logger.info('App.pause() is not available on this OS.')
def on_start(self):
'''Event handler for the `on_start` event which is fired after
initialization (after build() has been called) but before the
application has started running.
'''
pass
def on_stop(self):
'''Event handler for the `on_stop` event which is fired when the
application has finished running (i.e. the window is about to be
closed).
'''
pass
def on_pause(self):
'''Event handler called when Pause mode is requested. You should
return True if your app can go into Pause mode, otherwise
return False and your application will be stopped.
You cannot control when the application is going to go into this mode.
It's determined by the Operating System and mostly used for mobile
devices (android/ios) and for resizing.
The default return value is True.
.. versionadded:: 1.1.0
.. versionchanged:: 1.10.0
The default return value is now True.
'''
return True
def on_resume(self):
'''Event handler called when your application is resuming from
the Pause mode.
.. versionadded:: 1.1.0
.. warning::
When resuming, the OpenGL Context might have been damaged / freed.
This is where you can reconstruct some of your OpenGL state
e.g. FBO content.
'''
pass
@staticmethod
def get_running_app():
'''Return the currently running application instance.
.. versionadded:: 1.1.0
'''
return App._running_app
def on_config_change(self, config, section, key, value):
'''Event handler fired when a configuration token has been changed by
the settings page.
.. versionchanged:: 1.10.1
Added corresponding ``on_config_change`` event.
'''
pass
def open_settings(self, *largs):
'''Open the application settings panel. It will be created the very
first time, or recreated if the previously cached panel has been
removed by :meth:`destroy_settings`. The settings panel will be
displayed with the
:meth:`display_settings` method, which by default adds the
settings panel to the Window attached to your application. You
should override that method if you want to display the
settings panel differently.
:return:
True if the settings has been opened.
'''
if self._app_settings is None:
self._app_settings = self.create_settings()
displayed = self.display_settings(self._app_settings)
if displayed:
return True
return False
def display_settings(self, settings):
'''.. versionadded:: 1.8.0
Display the settings panel. By default, the panel is drawn directly
on top of the window. You can define other behaviour by overriding
this method, such as adding it to a ScreenManager or Popup.
You should return True if the display is successful, otherwise False.
:Parameters:
`settings`: :class:`~kivy.uix.settings.Settings`
You can modify this object in order to modify the settings
display.
'''
win = self._app_window
if not win:
raise Exception('No windows are set on the application, you cannot'
' open settings yet.')
if settings not in win.children:
win.add_widget(settings)
return True
return False
def close_settings(self, *largs):
'''Close the previously opened settings panel.
:return:
True if the settings has been closed.
'''
win = self._app_window
settings = self._app_settings
if win is None or settings is None:
return
if settings in win.children:
win.remove_widget(settings)
return True
return False
def create_settings(self):
'''Create the settings panel. This method will normally
be called only one time per
application life-time and the result is cached internally,
but it may be called again if the cached panel is removed
by :meth:`destroy_settings`.
By default, it will build a settings panel according to
:attr:`settings_cls`, call :meth:`build_settings`, add a Kivy panel if
:attr:`use_kivy_settings` is True, and bind to
on_close/on_config_change.
If you want to plug your own way of doing settings, without the Kivy
panel or close/config change events, this is the method you want to
overload.
.. versionadded:: 1.8.0
'''
if self.settings_cls is None:
from kivy.uix.settings import SettingsWithSpinner
self.settings_cls = SettingsWithSpinner
elif isinstance(self.settings_cls, string_types):
self.settings_cls = Factory.get(self.settings_cls)
s = self.settings_cls()
self.build_settings(s)
if self.use_kivy_settings:
s.add_kivy_panel()
s.bind(on_close=self.close_settings,
on_config_change=self._on_config_change)
return s
def destroy_settings(self):
'''.. versionadded:: 1.8.0
Dereferences the current settings panel if one
exists. This means that when :meth:`App.open_settings` is next
run, a new panel will be created and displayed. It doesn't
affect any of the contents of the panel, but lets you (for
instance) refresh the settings panel layout if you have
changed the settings widget in response to a screen size
change.
If you have modified :meth:`~App.open_settings` or
:meth:`~App.display_settings`, you should be careful to
correctly detect if the previous settings widget has been
destroyed.
'''
if self._app_settings is not None:
self._app_settings = None
#
# privates
#
def _on_config_change(self, *largs):
self.dispatch('on_config_change', *largs[1:])
def _install_settings_keys(self, window):
window.bind(on_keyboard=self._on_keyboard_settings)
def _on_keyboard_settings(self, window, *largs):
key = largs[0]
setting_key = 282 # F1
# android hack, if settings key is pygame K_MENU
if platform == 'android' and not USE_SDL2:
import pygame
setting_key = pygame.K_MENU
if key == setting_key:
# toggle settings panel
if not self.open_settings():
self.close_settings()
return True
if key == 27:
return self.close_settings()
def on_title(self, instance, title):
if self._app_window:
self._app_window.set_title(title)
def on_icon(self, instance, icon):
if self._app_window:
self._app_window.set_icon(self.get_application_icon())
| mit | d92953a18eddf800ce2fbf83b5d932a8 | 34.068425 | 79 | 0.625723 | 4.274746 | false | true | false | false |
kivy/kivy | examples/miscellaneous/shapedwindow.py | 11 | 2550 | from kivy.config import Config
Config.set('graphics', 'shaped', 1)
from kivy.resources import resource_find
default_shape = Config.get('kivy', 'window_shape')
alpha_shape = resource_find('data/logo/kivy-icon-512.png')
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import (
BooleanProperty,
StringProperty,
ListProperty,
)
Builder.load_string('''
#:import win kivy.core.window.Window
<Root>:
orientation: 'vertical'
BoxLayout:
Button:
text: 'default_shape'
on_release: app.shape_image = app.default_shape
Button:
text: 'alpha_shape'
on_release: app.shape_image = app.alpha_shape
BoxLayout:
ToggleButton:
group: 'mode'
text: 'default'
state: 'down'
on_release: win.shape_mode = 'default'
ToggleButton:
group: 'mode'
text: 'binalpha'
on_release: win.shape_mode = 'binalpha'
ToggleButton:
group: 'mode'
text: 'reversebinalpha'
on_release: win.shape_mode = 'reversebinalpha'
ToggleButton:
group: 'mode'
text: 'colorkey'
on_release: win.shape_mode = 'colorkey'
BoxLayout:
ToggleButton:
group: 'cutoff'
text: 'cutoff True'
state: 'down'
on_release: win.shape_cutoff = True
ToggleButton:
group: 'cutoff'
text: 'cutoff False'
on_release: win.shape_cutoff = False
BoxLayout:
ToggleButton:
group: 'colorkey'
text: '1, 1, 1, 1'
state: 'down'
on_release: win.shape_color_key = [1, 1, 1, 1]
ToggleButton:
group: 'colorkey'
text: '0, 0, 0, 1'
on_release: win.shape_color_key = [0, 0, 0, 1]
''')
class Root(BoxLayout):
pass
class ShapedWindow(App):
shape_image = StringProperty('', force_dispatch=True)
def on_shape_image(self, instance, value):
if 'kivy-icon' in value:
Window.size = (512, 512)
Window.shape_image = self.alpha_shape
else:
Window.size = (800, 600)
Window.shape_image = self.default_shape
def build(self):
self.default_shape = default_shape
self.alpha_shape = alpha_shape
return Root()
if __name__ == '__main__':
ShapedWindow().run()
| mit | ef217907fc91887710b862da08a47312 | 25.020408 | 59 | 0.560392 | 3.766617 | false | false | false | false |
kivy/kivy | kivy/core/image/__init__.py | 1 | 32041 | '''
Image
=====
Core classes for loading images and converting them to a
:class:`~kivy.graphics.texture.Texture`. The raw image data can be keep in
memory for further access.
.. versionchanged:: 1.11.0
Add support for argb and abgr image data
In-memory image loading
-----------------------
.. versionadded:: 1.9.0
Official support for in-memory loading. Not all the providers support it,
but currently SDL2, pygame, pil and imageio work.
To load an image with a filename, you would usually do::
from kivy.core.image import Image as CoreImage
im = CoreImage("image.png")
You can also load the image data directly from a memory block. Instead of
passing the filename, you'll need to pass the data as a BytesIO object
together with an "ext" parameter. Both are mandatory::
import io
from kivy.core.image import Image as CoreImage
data = io.BytesIO(open("image.png", "rb").read())
im = CoreImage(data, ext="png")
By default, the image will not be cached as our internal cache requires a
filename. If you want caching, add a filename that represents your file (it
will be used only for caching)::
import io
from kivy.core.image import Image as CoreImage
data = io.BytesIO(open("image.png", "rb").read())
im = CoreImage(data, ext="png", filename="image.png")
Saving an image
---------------
A CoreImage can be saved to a file::
from kivy.core.image import Image as CoreImage
image = CoreImage(...)
image.save("/tmp/test.png")
Or you can get the bytes (new in `1.11.0`):
import io
from kivy.core.image import Image as CoreImage
data = io.BytesIO()
image = CoreImage(...)
image.save(data, fmt="png")
png_bytes = data.read()
'''
import re
from base64 import b64decode
import imghdr
__all__ = ('Image', 'ImageLoader', 'ImageData')
from kivy.event import EventDispatcher
from kivy.core import core_register_libs
from kivy.logger import Logger
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.atlas import Atlas
from kivy.resources import resource_find
from kivy.utils import platform
from kivy.compat import string_types
from kivy.setupconfig import USE_SDL2
import zipfile
from io import BytesIO
# late binding
Texture = TextureRegion = None
# register image caching only for keep_data=True
Cache.register('kv.image', timeout=60)
Cache.register('kv.atlas')
class ImageData(object):
'''Container for images and mipmap images.
The container will always have at least the mipmap level 0.
'''
__slots__ = ('fmt', 'mipmaps', 'source', 'flip_vertical', 'source_image')
_supported_fmts = ('rgb', 'bgr', 'rgba', 'bgra', 'argb', 'abgr',
's3tc_dxt1', 's3tc_dxt3', 's3tc_dxt5', 'pvrtc_rgb2',
'pvrtc_rgb4', 'pvrtc_rgba2', 'pvrtc_rgba4', 'etc1_rgb8')
def __init__(self, width, height, fmt, data, source=None,
flip_vertical=True, source_image=None,
rowlength=0):
assert fmt in ImageData._supported_fmts
#: Decoded image format, one of a available texture format
self.fmt = fmt
#: Data for each mipmap.
self.mipmaps = {}
self.add_mipmap(0, width, height, data, rowlength)
#: Image source, if available
self.source = source
#: Indicate if the texture will need to be vertically flipped
self.flip_vertical = flip_vertical
# the original image, which we might need to save if it is a memoryview
self.source_image = source_image
def release_data(self):
mm = self.mipmaps
for item in mm.values():
item[2] = None
self.source_image = None
@property
def width(self):
'''Image width in pixels.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][0]
@property
def height(self):
'''Image height in pixels.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][1]
@property
def data(self):
'''Image data.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][2]
@property
def rowlength(self):
'''Image rowlength.
(If the image is mipmapped, it will use the level 0)
.. versionadded:: 1.9.0
'''
return self.mipmaps[0][3]
@property
def size(self):
'''Image (width, height) in pixels.
(If the image is mipmapped, it will use the level 0)
'''
mm = self.mipmaps[0]
return mm[0], mm[1]
@property
def have_mipmap(self):
return len(self.mipmaps) > 1
def __repr__(self):
return ('<ImageData width=%d height=%d fmt=%s '
'source=%r with %d images>' % (
self.width, self.height, self.fmt,
self.source, len(self.mipmaps)))
def add_mipmap(self, level, width, height, data, rowlength):
'''Add a image for a specific mipmap level.
.. versionadded:: 1.0.7
'''
self.mipmaps[level] = [int(width), int(height), data, rowlength]
def get_mipmap(self, level):
'''Get the mipmap image at a specific level if it exists
.. versionadded:: 1.0.7
'''
if level == 0:
return (self.width, self.height, self.data, self.rowlength)
assert level < len(self.mipmaps)
return self.mipmaps[level]
def iterate_mipmaps(self):
'''Iterate over all mipmap images available.
.. versionadded:: 1.0.7
'''
mm = self.mipmaps
for x in range(len(mm)):
item = mm.get(x, None)
if item is None:
raise Exception('Invalid mipmap level, found empty one')
yield x, item[0], item[1], item[2], item[3]
class ImageLoaderBase(object):
'''Base to implement an image loader.'''
__slots__ = ('_texture', '_data', 'filename', 'keep_data',
'_mipmap', '_nocache', '_ext', '_inline')
def __init__(self, filename, **kwargs):
self._mipmap = kwargs.get('mipmap', False)
self.keep_data = kwargs.get('keep_data', False)
self._nocache = kwargs.get('nocache', False)
self._ext = kwargs.get('ext')
self._inline = kwargs.get('inline')
self.filename = filename
if self._inline:
self._data = self.load(kwargs.get('rawdata'))
else:
self._data = self.load(filename)
self._textures = None
def load(self, filename):
'''Load an image'''
return None
@staticmethod
def can_save(fmt, is_bytesio=False):
'''Indicate if the loader can save the Image object
.. versionchanged:: 1.11.0
Parameter `fmt` and `is_bytesio` added
'''
return False
@staticmethod
def can_load_memory():
'''Indicate if the loader can load an image by passing data
'''
return False
@staticmethod
def save(*largs, **kwargs):
raise NotImplementedError()
def populate(self):
self._textures = []
fname = self.filename
if __debug__:
Logger.trace('Image: %r, populate to textures (%d)' %
(fname, len(self._data)))
for count in range(len(self._data)):
# first, check if a texture with the same name already exist in the
# cache
chr = type(fname)
uid = chr(u'%s|%d|%d') % (fname, self._mipmap, count)
texture = Cache.get('kv.texture', uid)
# if not create it and append to the cache
if texture is None:
imagedata = self._data[count]
source = '{}{}|'.format(
'zip|' if fname.endswith('.zip') else '',
self._nocache)
imagedata.source = chr(source) + uid
texture = Texture.create_from_data(
imagedata, mipmap=self._mipmap)
if not self._nocache:
Cache.append('kv.texture', uid, texture)
if imagedata.flip_vertical:
texture.flip_vertical()
# set as our current texture
self._textures.append(texture)
# release data if ask
if not self.keep_data:
self._data[count].release_data()
@property
def width(self):
'''Image width
'''
return self._data[0].width
@property
def height(self):
'''Image height
'''
return self._data[0].height
@property
def size(self):
'''Image size (width, height)
'''
return (self._data[0].width, self._data[0].height)
@property
def texture(self):
'''Get the image texture (created on the first call)
'''
if self._textures is None:
self.populate()
if self._textures is None:
return None
return self._textures[0]
@property
def textures(self):
'''Get the textures list (for mipmapped image or animated image)
.. versionadded:: 1.0.8
'''
if self._textures is None:
self.populate()
return self._textures
@property
def nocache(self):
'''Indicate if the texture will not be stored in the cache
.. versionadded:: 1.6.0
'''
return self._nocache
class ImageLoader(object):
loaders = []
@staticmethod
def zip_loader(filename, **kwargs):
'''Read images from an zip file.
.. versionadded:: 1.0.8
Returns an Image with a list of type ImageData stored in Image._data
'''
# read zip in memory for faster access
_file = BytesIO(open(filename, 'rb').read())
# read all images inside the zip
z = zipfile.ZipFile(_file)
image_data = []
# sort filename list
znamelist = z.namelist()
znamelist.sort()
image = None
for zfilename in znamelist:
try:
# read file and store it in mem with fileIO struct around it
tmpfile = BytesIO(z.read(zfilename))
ext = zfilename.split('.')[-1].lower()
im = None
for loader in ImageLoader.loaders:
if (ext not in loader.extensions() or
not loader.can_load_memory()):
continue
Logger.debug('Image%s: Load <%s> from <%s>' %
(loader.__name__[11:], zfilename, filename))
try:
im = loader(zfilename, ext=ext, rawdata=tmpfile,
inline=True, **kwargs)
except:
# Loader failed, continue trying.
continue
break
if im is not None:
# append ImageData to local variable before its
# overwritten
image_data.append(im._data[0])
image = im
# else: if not image file skip to next
except:
Logger.warning('Image: Unable to load image'
'<%s> in zip <%s> trying to continue...'
% (zfilename, filename))
z.close()
if len(image_data) == 0:
raise Exception('no images in zip <%s>' % filename)
# replace Image.Data with the array of all the images in the zip
image._data = image_data
image.filename = filename
return image
@staticmethod
def register(defcls):
ImageLoader.loaders.append(defcls)
@staticmethod
def load(filename, **kwargs):
# atlas ?
if filename[:8] == 'atlas://':
# remove the url
rfn = filename[8:]
# last field is the ID
try:
rfn, uid = rfn.rsplit('/', 1)
except ValueError:
raise ValueError(
'Image: Invalid %s name for atlas' % filename)
# search if we already got the atlas loaded
atlas = Cache.get('kv.atlas', rfn)
# atlas already loaded, so reupload the missing texture in cache,
# because when it's not in use, the texture can be removed from the
# kv.texture cache.
if atlas:
texture = atlas[uid]
fn = 'atlas://%s/%s' % (rfn, uid)
cid = '{}|{:d}|{:d}'.format(fn, False, 0)
Cache.append('kv.texture', cid, texture)
return Image(texture)
# search with resource
afn = rfn
if not afn.endswith('.atlas'):
afn += '.atlas'
afn = resource_find(afn)
if not afn:
raise Exception('Unable to find %r atlas' % afn)
atlas = Atlas(afn)
Cache.append('kv.atlas', rfn, atlas)
# first time, fill our texture cache.
for nid, texture in atlas.textures.items():
fn = 'atlas://%s/%s' % (rfn, nid)
cid = '{}|{:d}|{:d}'.format(fn, False, 0)
Cache.append('kv.texture', cid, texture)
return Image(atlas[uid])
# extract extensions
ext = filename.split('.')[-1].lower()
# prevent url querystrings
if filename.startswith((('http://', 'https://'))):
ext = ext.split('?')[0]
filename = resource_find(filename)
# special case. When we are trying to load a "zip" file with image, we
# will use the special zip_loader in ImageLoader. This might return a
# sequence of images contained in the zip.
if ext == 'zip':
return ImageLoader.zip_loader(filename)
else:
im = None
# Get actual image format instead of extension if possible
ext = imghdr.what(filename) or ext
for loader in ImageLoader.loaders:
if ext not in loader.extensions():
continue
Logger.debug('Image%s: Load <%s>' %
(loader.__name__[11:], filename))
im = loader(filename, **kwargs)
break
if im is None:
raise Exception('Unknown <%s> type, no loader found.' % ext)
return im
class Image(EventDispatcher):
'''Load an image and store the size and texture.
.. versionchanged:: 1.0.7
`mipmap` attribute has been added. The `texture_mipmap` and
`texture_rectangle` have been deleted.
.. versionchanged:: 1.0.8
An Image widget can change its texture. A new event 'on_texture' has
been introduced. New methods for handling sequenced animation have been
added.
:Parameters:
`arg`: can be a string (str), Texture, BytesIO or Image object
A string path to the image file or data URI to be loaded; or a
Texture object, which will be wrapped in an Image object; or a
BytesIO object containing raw image data; or an already existing
image object, in which case, a real copy of the given image object
will be returned.
`keep_data`: bool, defaults to False
Keep the image data when the texture is created.
`mipmap`: bool, defaults to False
Create mipmap for the texture.
`anim_delay`: float, defaults to .25
Delay in seconds between each animation frame. Lower values means
faster animation.
`ext`: str, only with BytesIO `arg`
File extension to use in determining how to load raw image data.
`filename`: str, only with BytesIO `arg`
Filename to use in the image cache for raw image data.
'''
copy_attributes = ('_size', '_filename', '_texture', '_image',
'_mipmap', '_nocache')
data_uri_re = re.compile(r'^data:image/([^;,]*)(;[^,]*)?,(.*)$')
_anim_ev = None
def __init__(self, arg, **kwargs):
# this event should be fired on animation of sequenced img's
self.register_event_type('on_texture')
super(Image, self).__init__()
self._mipmap = kwargs.get('mipmap', False)
self._keep_data = kwargs.get('keep_data', False)
self._nocache = kwargs.get('nocache', False)
self._size = [0, 0]
self._image = None
self._filename = None
self._texture = None
self._anim_available = False
self._anim_index = 0
self._anim_delay = 0
self.anim_delay = kwargs.get('anim_delay', .25)
# indicator of images having been loded in cache
self._iteration_done = False
if isinstance(arg, Image):
for attr in Image.copy_attributes:
self.__setattr__(attr, arg.__getattribute__(attr))
elif type(arg) in (Texture, TextureRegion):
if not hasattr(self, 'textures'):
self.textures = []
self.textures.append(arg)
self._texture = arg
self._size = self.texture.size
elif isinstance(arg, ImageLoaderBase):
self.image = arg
elif isinstance(arg, BytesIO):
ext = kwargs.get('ext', None)
if not ext:
raise Exception('Inline loading require "ext" parameter')
filename = kwargs.get('filename')
if not filename:
self._nocache = True
filename = '__inline__'
self.load_memory(arg, ext, filename)
elif isinstance(arg, string_types):
groups = self.data_uri_re.findall(arg)
if groups:
self._nocache = True
imtype, optstr, data = groups[0]
options = [o for o in optstr.split(';') if o]
ext = imtype
isb64 = 'base64' in options
if data:
if isb64:
data = b64decode(data)
self.load_memory(BytesIO(data), ext)
else:
self.filename = arg
else:
raise Exception('Unable to load image type {0!r}'.format(arg))
def remove_from_cache(self):
'''Remove the Image from cache. This facilitates re-loading of
images from disk in case the image content has changed.
.. versionadded:: 1.3.0
Usage::
im = CoreImage('1.jpg')
# -- do something --
im.remove_from_cache()
im = CoreImage('1.jpg')
# this time image will be re-loaded from disk
'''
count = 0
f = self.filename
pat = type(f)(u'%s|%d|%d')
uid = pat % (f, self._mipmap, count)
Cache.remove("kv.image", uid)
while Cache.get("kv.texture", uid):
Cache.remove("kv.texture", uid)
count += 1
uid = pat % (f, self._mipmap, count)
def _anim(self, *largs):
if not self._image:
return
textures = self.image.textures
if self._anim_index >= len(textures):
self._anim_index = 0
self._texture = self.image.textures[self._anim_index]
self.dispatch('on_texture')
self._anim_index += 1
self._anim_index %= len(self._image.textures)
def anim_reset(self, allow_anim):
'''Reset an animation if available.
.. versionadded:: 1.0.8
:Parameters:
`allow_anim`: bool
Indicate whether the animation should restart playing or not.
Usage::
# start/reset animation
image.anim_reset(True)
# or stop the animation
image.anim_reset(False)
You can change the animation speed whilst it is playing::
# Set to 20 FPS
image.anim_delay = 1 / 20.
'''
# stop animation
if self._anim_ev is not None:
self._anim_ev.cancel()
self._anim_ev = None
if allow_anim and self._anim_available and self._anim_delay >= 0:
self._anim_ev = Clock.schedule_interval(self._anim,
self.anim_delay)
self._anim()
def _get_anim_delay(self):
return self._anim_delay
def _set_anim_delay(self, x):
if self._anim_delay == x:
return
self._anim_delay = x
if self._anim_available:
if self._anim_ev is not None:
self._anim_ev.cancel()
self._anim_ev = None
if self._anim_delay >= 0:
self._anim_ev = Clock.schedule_interval(self._anim,
self._anim_delay)
anim_delay = property(_get_anim_delay, _set_anim_delay)
'''Delay between each animation frame. A lower value means faster
animation.
.. versionadded:: 1.0.8
'''
@property
def anim_available(self):
'''Return True if this Image instance has animation available.
.. versionadded:: 1.0.8
'''
return self._anim_available
@property
def anim_index(self):
'''Return the index number of the image currently in the texture.
.. versionadded:: 1.0.8
'''
return self._anim_index
def _img_iterate(self, *largs):
if not self.image or self._iteration_done:
return
self._iteration_done = True
imgcount = len(self.image.textures)
if imgcount > 1:
self._anim_available = True
self.anim_reset(True)
self._texture = self.image.textures[0]
def on_texture(self, *largs):
'''This event is fired when the texture reference or content has
changed. It is normally used for sequenced images.
.. versionadded:: 1.0.8
'''
pass
@staticmethod
def load(filename, **kwargs):
'''Load an image
:Parameters:
`filename`: str
Filename of the image.
`keep_data`: bool, defaults to False
Keep the image data when the texture is created.
'''
kwargs.setdefault('keep_data', False)
return Image(filename, **kwargs)
def _get_image(self):
return self._image
def _set_image(self, image):
self._image = image
if hasattr(image, 'filename'):
self._filename = image.filename
if image:
self._size = (self.image.width, self.image.height)
image = property(_get_image, _set_image,
doc='Get/set the data image object')
def _get_filename(self):
return self._filename
def _set_filename(self, value):
if value is None or value == self._filename:
return
self._filename = value
# construct uid as a key for Cache
f = self.filename
uid = type(f)(u'%s|%d|%d') % (f, self._mipmap, 0)
# in case of Image have been asked with keep_data
# check the kv.image cache instead of texture.
image = Cache.get('kv.image', uid)
if image:
# we found an image, yeah ! but reset the texture now.
self.image = image
# if image.__class__ is core image then it's a texture
# from atlas or other sources and has no data so skip
if (image.__class__ != self.__class__ and
not image.keep_data and self._keep_data):
self.remove_from_cache()
self._filename = ''
self._set_filename(value)
else:
self._texture = None
return
else:
# if we already got a texture, it will be automatically reloaded.
_texture = Cache.get('kv.texture', uid)
if _texture:
self._texture = _texture
return
# if image not already in cache then load
tmpfilename = self._filename
image = ImageLoader.load(
self._filename, keep_data=self._keep_data,
mipmap=self._mipmap, nocache=self._nocache)
self._filename = tmpfilename
# put the image into the cache if needed
if isinstance(image, Texture):
self._texture = image
self._size = image.size
else:
self.image = image
if not self._nocache:
Cache.append('kv.image', uid, self.image)
filename = property(_get_filename, _set_filename,
doc='Get/set the filename of image')
def load_memory(self, data, ext, filename='__inline__'):
'''(internal) Method to load an image from raw data.
'''
self._filename = filename
# see if there is a available loader for it
loaders = [loader for loader in ImageLoader.loaders if
loader.can_load_memory() and
ext in loader.extensions()]
if not loaders:
raise Exception('No inline loader found to load {}'.format(ext))
image = loaders[0](filename, ext=ext, rawdata=data, inline=True,
nocache=self._nocache, mipmap=self._mipmap,
keep_data=self._keep_data)
if isinstance(image, Texture):
self._texture = image
self._size = image.size
else:
self.image = image
@property
def size(self):
'''Image size (width, height)
'''
return self._size
@property
def width(self):
'''Image width
'''
return self._size[0]
@property
def height(self):
'''Image height
'''
return self._size[1]
@property
def texture(self):
'''Texture of the image'''
if self.image:
if not self._iteration_done:
self._img_iterate()
return self._texture
@property
def nocache(self):
'''Indicate whether the texture will not be stored in the cache or not.
.. versionadded:: 1.6.0
'''
return self._nocache
def save(self, filename, flipped=False, fmt=None):
'''Save image texture to file.
The filename should have the '.png' extension because the texture data
read from the GPU is in the RGBA format. '.jpg' might work but has not
been heavily tested so some providers might break when using it.
Any other extensions are not officially supported.
The flipped parameter flips the saved image vertically, and
defaults to False.
Example::
# Save an core image object
from kivy.core.image import Image
img = Image('hello.png')
img.save('hello2.png')
# Save a texture
texture = Texture.create(...)
img = Image(texture)
img.save('hello3.png')
.. versionadded:: 1.7.0
.. versionchanged:: 1.8.0
Parameter `flipped` added to flip the image before saving, default
to False.
.. versionchanged:: 1.11.0
Parameter `fmt` added to force the output format of the file
Filename can now be a BytesIO object.
'''
is_bytesio = False
if isinstance(filename, BytesIO):
is_bytesio = True
if not fmt:
raise Exception(
"You must specify a format to save into a BytesIO object")
elif fmt is None:
fmt = self._find_format_from_filename(filename)
pixels = None
size = None
loaders = [
x for x in ImageLoader.loaders
if x.can_save(fmt, is_bytesio=is_bytesio)
]
if not loaders:
return False
loader = loaders[0]
if self.image:
# we might have a ImageData object to use
data = self.image._data[0]
if data.data is not None:
if data.fmt in ('rgba', 'rgb'):
# fast path, use the "raw" data when keep_data is used
size = data.width, data.height
pixels = data.data
else:
# the format is not rgba, we need to convert it.
# use texture for that.
self.populate()
if pixels is None and self._texture:
# use the texture pixels
size = self._texture.size
pixels = self._texture.pixels
if pixels is None:
return False
l_pixels = len(pixels)
if l_pixels == size[0] * size[1] * 3:
pixelfmt = 'rgb'
elif l_pixels == size[0] * size[1] * 4:
pixelfmt = 'rgba'
else:
raise Exception('Unable to determine the format of the pixels')
return loader.save(
filename, size[0], size[1], pixelfmt, pixels, flipped, fmt)
def _find_format_from_filename(self, filename):
ext = filename.rsplit(".", 1)[-1].lower()
if ext in {
'bmp', 'jpe', 'lbm', 'pcx', 'png', 'pnm',
'tga', 'tiff', 'webp', 'xcf', 'xpm', 'xv'}:
return ext
elif ext in ('jpg', 'jpeg'):
return 'jpg'
elif ext in ('b64', 'base64'):
return 'base64'
return None
def read_pixel(self, x, y):
'''For a given local x/y position, return the pixel color at that
position.
.. warning::
This function can only be used with images loaded with the
keep_data=True keyword. For example::
m = Image.load('image.png', keep_data=True)
color = m.read_pixel(150, 150)
:Parameters:
`x`: int
Local x coordinate of the pixel in question.
`y`: int
Local y coordinate of the pixel in question.
'''
data = self.image._data[0]
# can't use this function without ImageData
if data.data is None:
raise EOFError('Image data is missing, make sure that image is'
'loaded with keep_data=True keyword.')
# check bounds
x, y = int(x), int(y)
if not (0 <= x < data.width and 0 <= y < data.height):
raise IndexError('Position (%d, %d) is out of range.' % (x, y))
assert data.fmt in ImageData._supported_fmts
size = 3 if data.fmt in ('rgb', 'bgr') else 4
index = y * data.width * size + x * size
raw = bytearray(data.data[index:index + size])
color = [c / 255.0 for c in raw]
bgr_flag = False
if data.fmt == 'argb':
color.reverse() # bgra
bgr_flag = True
elif data.fmt == 'abgr':
color.reverse() # rgba
# conversion for BGR->RGB, BGRA->RGBA format
if bgr_flag or data.fmt in ('bgr', 'bgra'):
color[0], color[2] = color[2], color[0]
return color
def load(filename):
'''Load an image'''
return Image.load(filename)
# load image loaders
image_libs = []
if platform in ('macosx', 'ios'):
image_libs += [('imageio', 'img_imageio')]
image_libs += [
('tex', 'img_tex'),
('dds', 'img_dds')]
if USE_SDL2:
image_libs += [('sdl2', 'img_sdl2')]
else:
image_libs += [('pygame', 'img_pygame')]
image_libs += [
('ffpy', 'img_ffpyplayer'),
('pil', 'img_pil')]
libs_loaded = core_register_libs('image', image_libs)
from os import environ
if 'KIVY_DOC' not in environ and not libs_loaded:
import sys
Logger.critical('App: Unable to get any Image provider, abort.')
sys.exit(1)
# resolve binding.
from kivy.graphics.texture import Texture, TextureRegion
| mit | e689daf26cb7ec2c7e3cdfa0365bbe67 | 30.977046 | 79 | 0.541775 | 4.209828 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/slimit/visitors/ecmavisitor.py | 9 | 12757 | ###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
from slimit import ast
class ECMAVisitor(object):
def __init__(self):
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_%s' % node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
return 'GEN: %r' % node
def visit_Program(self, node):
return '\n'.join(self.visit(child) for child in node)
def visit_Block(self, node):
s = '{\n'
self.indent_level += 2
s += '\n'.join(
self._make_indent() + self.visit(child) for child in node)
self.indent_level -= 2
s += '\n' + self._make_indent() + '}'
return s
def visit_VarStatement(self, node):
s = 'var %s;' % ', '.join(self.visit(child) for child in node)
return s
def visit_VarDecl(self, node):
output = []
output.append(self.visit(node.identifier))
if node.initializer is not None:
output.append(' = %s' % self.visit(node.initializer))
return ''.join(output)
def visit_Identifier(self, node):
return node.value
def visit_Assign(self, node):
if node.op == ':':
template = '%s%s %s'
else:
template = '%s %s %s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_GetPropAssign(self, node):
template = 'get %s() {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), body, tail)
def visit_SetPropAssign(self, node):
template = 'set %s(%s) {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
if len(node.parameters) > 1:
raise SyntaxError(
'Setter functions must have one argument: %s' % node)
params = ','.join(self.visit(param) for param in node.parameters)
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), params, body, tail)
def visit_Number(self, node):
return node.value
def visit_Comma(self, node):
s = '%s, %s' % (self.visit(node.left), self.visit(node.right))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_EmptyStatement(self, node):
return node.value
def visit_If(self, node):
s = 'if ('
if node.predicate is not None:
s += self.visit(node.predicate)
s += ') '
s += self.visit(node.consequent)
if node.alternative is not None:
s += ' else '
s += self.visit(node.alternative)
return s
def visit_Boolean(self, node):
return node.value
def visit_For(self, node):
s = 'for ('
if node.init is not None:
s += self.visit(node.init)
if node.init is None:
s += ' ; '
elif isinstance(node.init, (ast.Assign, ast.Comma, ast.FunctionCall,
ast.UnaryOp, ast.Identifier, ast.BinOp,
ast.Conditional, ast.Regex, ast.NewExpr)):
s += '; '
else:
s += ' '
if node.cond is not None:
s += self.visit(node.cond)
s += '; '
if node.count is not None:
s += self.visit(node.count)
s += ') ' + self.visit(node.statement)
return s
def visit_ForIn(self, node):
if isinstance(node.item, ast.VarDecl):
template = 'for (var %s in %s) '
else:
template = 'for (%s in %s) '
s = template % (self.visit(node.item), self.visit(node.iterable))
s += self.visit(node.statement)
return s
def visit_BinOp(self, node):
if getattr(node, '_parens', False):
template = '(%s %s %s)'
else:
template = '%s %s %s'
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.postfix:
s += node.op
elif node.op in ('delete', 'void', 'typeof'):
s = '%s %s' % (node.op, s)
else:
s = '%s%s' % (node.op, s)
if getattr(node, '_parens', False):
s = '(%s)' % s
return s
def visit_ExprStatement(self, node):
return '%s;' % self.visit(node.expr)
def visit_DoWhile(self, node):
s = 'do '
s += self.visit(node.statement)
s += ' while (%s);' % self.visit(node.predicate)
return s
def visit_While(self, node):
s = 'while (%s) ' % self.visit(node.predicate)
s += self.visit(node.statement)
return s
def visit_Null(self, node):
return 'null'
def visit_String(self, node):
return node.value
def visit_Continue(self, node):
if node.identifier is not None:
s = 'continue %s;' % self.visit_Identifier(node.identifier)
else:
s = 'continue;'
return s
def visit_Break(self, node):
if node.identifier is not None:
s = 'break %s;' % self.visit_Identifier(node.identifier)
else:
s = 'break;'
return s
def visit_Return(self, node):
if node.expr is None:
return 'return;'
else:
return 'return %s;' % self.visit(node.expr)
def visit_With(self, node):
s = 'with (%s) ' % self.visit(node.expr)
s += self.visit(node.statement)
return s
def visit_Label(self, node):
s = '%s: %s' % (
self.visit(node.identifier), self.visit(node.statement))
return s
def visit_Switch(self, node):
s = 'switch (%s) {\n' % self.visit(node.expr)
self.indent_level += 2
for case in node.cases:
s += self._make_indent() + self.visit_Case(case)
if node.default is not None:
s += self.visit_Default(node.default)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def visit_Case(self, node):
s = 'case %s:\n' % self.visit(node.expr)
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if elements:
s += elements + '\n'
self.indent_level -= 2
return s
def visit_Default(self, node):
s = self._make_indent() + 'default:\n'
self.indent_level += 2
s += '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if node.elements is not None:
s += '\n'
self.indent_level -= 2
return s
def visit_Throw(self, node):
s = 'throw %s;' % self.visit(node.expr)
return s
def visit_Debugger(self, node):
return '%s;' % node.value
def visit_Try(self, node):
s = 'try '
s += self.visit(node.statements)
if node.catch is not None:
s += ' ' + self.visit(node.catch)
if node.fin is not None:
s += ' ' + self.visit(node.fin)
return s
def visit_Catch(self, node):
s = 'catch (%s) %s' % (
self.visit(node.identifier), self.visit(node.elements))
return s
def visit_Finally(self, node):
s = 'finally %s' % self.visit(node.elements)
return s
def visit_FuncDecl(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
s = 'function %s(%s) {\n%s' % (
self.visit(node.identifier),
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
return s
def visit_FuncExpr(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
ident = node.identifier
ident = '' if ident is None else ' %s' % self.visit(ident)
header = 'function%s(%s)'
if getattr(node, '_parens', False):
header = '(' + header
s = (header + ' {\n%s') % (
ident,
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
if getattr(node, '_parens', False):
s += ')'
return s
def visit_Conditional(self, node):
if getattr(node, '_parens', False):
template = '(%s ? %s : %s)'
else:
template = '%s ? %s : %s'
s = template % (
self.visit(node.predicate),
self.visit(node.consequent), self.visit(node.alternative))
return s
def visit_Regex(self, node):
if getattr(node, '_parens', False):
return '(%s)' % node.value
else:
return node.value
def visit_NewExpr(self, node):
s = 'new %s(%s)' % (
self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args)
)
return s
def visit_DotAccessor(self, node):
if getattr(node, '_parens', False):
template = '(%s.%s)'
else:
template = '%s.%s'
s = template % (self.visit(node.node), self.visit(node.identifier))
return s
def visit_BracketAccessor(self, node):
s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr))
return s
def visit_FunctionCall(self, node):
s = '%s(%s)' % (self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_Object(self, node):
s = '{\n'
self.indent_level += 2
s += ',\n'.join(self._make_indent() + self.visit(prop)
for prop in node.properties)
self.indent_level -= 2
if node.properties:
s += '\n'
s += self._make_indent() + '}'
return s
def visit_Array(self, node):
s = '['
length = len(node.items) - 1
for index, item in enumerate(node.items):
if isinstance(item, ast.Elision):
s += ','
elif index != length:
s += self.visit(item) + ','
else:
s += self.visit(item)
s += ']'
return s
def visit_This(self, node):
return 'this'
| mit | 44bcd2b386c7b0caf1f317d7df5fdc9c | 31.133501 | 79 | 0.519401 | 3.836692 | false | false | false | false |
kivy/kivy | kivy/tools/gles_compat/subset_gles.py | 12 | 4834 | '''
Common GLES Subset Extraction Script
====================================
In Kivy, our goal is to use OpenGL ES 2.0 (GLES2) for all drawing on all
platforms. The problem is that GLES2 is not a proper subset of any OpenGL
Desktop (GL) version prior to version 4.1.
However, to keep all our drawing cross-platform compatible, we're
restricting the Kivy drawing core to a real subset of GLES2 that is
available on all platforms.
This script therefore parses the GL and GL Extension (GLEXT) headers and
compares them with the GLES2 header. It then generates a header that only
contains symbols that are common to GLES2 and at least either GL or GLEXT.
However, since GLES2 doesn't support double values, we also need to do some
renaming, because functions in GL that took doubles as arguments now take
floats in GLES2, with their function name being suffixed with 'f'.
Furthermore, sometimes the pure symbol name doesn't match because there
might be an _EXT or _ARB or something akin to that at the end of a symbol
name. In that case, we take the symbol from the original header and add
a #define directive to redirect to that symbol from the symbol name without
extension.
'''
from __future__ import print_function
gl = open("/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/" +
"OpenGL.framework/Versions/A/Headers/gl.h", 'r')
glext = open("/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/" +
"OpenGL.framework/Versions/A/Headers/glext.h", 'r')
gles = open("gl2.h", 'r')
def add_defines_to_set(header):
symbols = []
lineno = 0
for line in header:
symbol = None
hexcode = None
lineno += 1
line = line.strip()
try:
elements = line.split()
if line.startswith("#define"):
symbol = elements[1]
for element in elements:
if element.startswith("0x"):
hexcode = element
elif line.startswith("typedef"):
symbol = elements[-1]
else:
for element in elements:
if element.startswith("gl"):
symbol = element
if symbol:
symbols.append((symbol, lineno, line, hexcode))
except Exception as e:
print('error:', lineno, ':', line)
print(e)
return symbols
def extract_common_symbols(symbols1, symbols2, already_extracted):
for symbol1, lineno1, line1, hexcode1 in symbols1:
for symbol2, lineno2, line2, hexcode2 in symbols2:
if symbol1 in already_extracted or symbol2 in already_extracted:
continue
if symbol1 == symbol2 + 'f':
# There is no `double` type in GLES; Functions that were using
# a double were renamed with the suffix 'f'.
print("// Different Name; Redefine")
print(line2)
print("#define %s %s" % (symbol1, symbol2))
elif symbol1 == symbol2:
already_extracted.append(symbol1)
print(line1)
if symbol1 == 'GLclampf;':
# See explanation about doubles on GLES above.
print('typedef GLclampf GLclampd;')
elif hexcode1 and hexcode2 and hexcode1 == hexcode2:
already_extracted.append(symbol1)
already_extracted.append(symbol2)
print("// Different Name; Redefine")
print(line2)
print("#define %s %s" % (symbol1, symbol2))
# Generate ------------------------------------------------
# pipe to kivy/kivy/graphics/common_subset.h
gl_symbols = add_defines_to_set(gl)
glext_symbols = add_defines_to_set(glext)
gles_symbols = add_defines_to_set(gles)
print('// GLES 2.0 Header file, generated for Kivy')
print('// Check kivy/kivy/tools/gles_compat/subset_gles.py')
print('#pragma once')
print('#include "gl2platform.h"')
print('#ifdef __cplusplus')
print('extern "C" {')
print('#endif')
# Don't add the same symbol more than once
already_extracted = []
print('\n// Subset common to GLES and GL: ===================================')
extract_common_symbols(gles_symbols, gl_symbols, already_extracted)
print('\n// Subset common to GLES and GLEXT: ================================')
extract_common_symbols(gles_symbols, glext_symbols, already_extracted)
print()
print('// What follows was manually extracted from the GLES2 headers because')
print('// it was not present in any other header.', end=' ')
print('''
#define GL_SHADER_BINARY_FORMATS 0x8DF8
#define GL_RGB565 0x8D62
''')
print('#ifdef __cplusplus')
print('}')
print('#endif')
print('\n')
| mit | 99c92334646033ff532c6aabcac97012 | 37.672 | 79 | 0.602193 | 4.100085 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/urllib3/filepost.py | 147 | 2511 | # urllib3/filepost.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
Iterate over fields.
.. deprecated ::
The addition of `~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
`~urllib3.fields.RequestField` objects, instead.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit | 907691b0aa85dc3b273dc7bedca6acdb | 23.861386 | 85 | 0.64317 | 3.851227 | false | false | false | false |
kivy/kivy | kivy/core/clipboard/__init__.py | 6 | 4556 | '''
Clipboard
=========
Core class for accessing the Clipboard. If we are not able to access the
system clipboard, a fake one will be used.
Usage example:
.. code-block:: kv
#:import Clipboard kivy.core.clipboard.Clipboard
Button:
on_release:
self.text = Clipboard.paste()
Clipboard.copy('Data')
'''
__all__ = ('ClipboardBase', 'Clipboard')
from kivy import Logger
from kivy.core import core_select_lib
from kivy.utils import platform
from kivy.setupconfig import USE_SDL2
class ClipboardBase(object):
def get(self, mimetype):
'''Get the current data in clipboard, using the mimetype if possible.
You not use this method directly. Use :meth:`paste` instead.
'''
pass
def put(self, data, mimetype):
'''Put data on the clipboard, and attach a mimetype.
You should not use this method directly. Use :meth:`copy` instead.
'''
pass
def get_types(self):
'''Return a list of supported mimetypes
'''
return []
def _ensure_clipboard(self):
''' Ensure that the clipboard has been properly initialised.
'''
if hasattr(self, '_clip_mime_type'):
return
if platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 little endian encoding
self._encoding = 'utf-16-le'
elif platform == 'linux':
self._clip_mime_type = 'text/plain;charset=utf-8'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
.. versionadded:: 1.9.0
'''
if data:
self._copy(data)
def paste(self):
''' Get text from the system clipboard and return it a usable string.
.. versionadded:: 1.9.0
'''
return self._paste()
def _copy(self, data):
self._ensure_clipboard()
if not isinstance(data, bytes):
data = data.encode(self._encoding)
self.put(data, self._clip_mime_type)
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = self.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
if isinstance(data, bytes):
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
return data
return u''
# load clipboard implementation
_clipboards = []
if platform == 'android':
_clipboards.append(
('android', 'clipboard_android', 'ClipboardAndroid'))
elif platform == 'macosx':
_clipboards.append(
('nspaste', 'clipboard_nspaste', 'ClipboardNSPaste'))
elif platform == 'win':
_clipboards.append(
('winctypes', 'clipboard_winctypes', 'ClipboardWindows'))
elif platform == 'linux':
_clipboards.append(
('xclip', 'clipboard_xclip', 'ClipboardXclip'))
_clipboards.append(
('xsel', 'clipboard_xsel', 'ClipboardXsel'))
_clipboards.append(
('dbusklipper', 'clipboard_dbusklipper', 'ClipboardDbusKlipper'))
_clipboards.append(
('gtk3', 'clipboard_gtk3', 'ClipboardGtk3'))
if USE_SDL2:
_clipboards.append(
('sdl2', 'clipboard_sdl2', 'ClipboardSDL2'))
else:
_clipboards.append(
('pygame', 'clipboard_pygame', 'ClipboardPygame'))
_clipboards.append(
('dummy', 'clipboard_dummy', 'ClipboardDummy'))
Clipboard = core_select_lib('clipboard', _clipboards, True)
CutBuffer = None
if platform == 'linux':
_cutbuffers = [
('xclip', 'clipboard_xclip', 'ClipboardXclip'),
('xsel', 'clipboard_xsel', 'ClipboardXsel'),
]
if Clipboard.__class__.__name__ in (c[2] for c in _cutbuffers):
CutBuffer = Clipboard
else:
CutBuffer = core_select_lib('cutbuffer', _cutbuffers, True,
basemodule='clipboard')
if CutBuffer:
Logger.info('CutBuffer: cut buffer support enabled')
| mit | 087423d1b4b422747535c9c9d0fae014 | 28.019108 | 78 | 0.59065 | 3.841484 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/urllib3/connection.py | 89 | 3386 | # urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
if self._tunnel_host:
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
if ssl:
HTTPSConnection = VerifiedHTTPSConnection
| mit | 6fb09f627364e8e4ef3e6032a24af111 | 30.64486 | 78 | 0.617247 | 4.44357 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/oauthlib/oauth2/rfc6749/__init__.py | 99 | 1728 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import functools
import logging
from .errors import TemporarilyUnavailableError, ServerError
from .errors import FatalClientError, OAuth2Error
log = logging.getLogger(__name__)
class BaseEndpoint(object):
def __init__(self):
self._available = True
self._catch_errors = False
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning(
'Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| mit | 7d197e2a5754470ede296e1cc406e12c | 25.181818 | 73 | 0.601273 | 4.396947 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/ply/yacc.py | 462 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| mit | dfeed359d68cb0707b23a11749d6f3f6 | 38.222222 | 176 | 0.464472 | 4.74578 | false | false | false | false |
kivy/kivy | kivy/core/spelling/spelling_enchant.py | 4 | 1507 | '''
Enchant Spelling
================
Implementation spelling backend based on enchant.
.. warning:: pyenchant doesn't have dedicated build anymore for Windows/x64.
See https://github.com/kivy/kivy/issues/5816 for more information
'''
import enchant
from kivy.core.spelling import SpellingBase, NoSuchLangError
from kivy.compat import PY2
class SpellingEnchant(SpellingBase):
'''
Spelling backend based on the enchant library.
'''
def __init__(self, language=None):
self._language = None
super(SpellingEnchant, self).__init__(language)
def select_language(self, language):
try:
self._language = enchant.Dict(language)
except enchant.DictNotFoundError:
err = 'Enchant Backend: No language for "%s"' % (language, )
raise NoSuchLangError(err)
def list_languages(self):
# Note: We do NOT return enchant.list_dicts because that also returns
# the enchant dict objects and not only the language identifiers.
return enchant.list_languages()
def check(self, word):
if not word:
return None
return self._language.check(word)
def suggest(self, fragment):
suggestions = self._language.suggest(fragment)
# Don't show suggestions that are invalid
suggestions = [s for s in suggestions if self.check(s)]
if PY2:
suggestions = [s.decode('utf-8') for s in suggestions]
return suggestions
| mit | 87b332cb09fc1eefa76521abb3dee222 | 29.14 | 79 | 0.64499 | 4.14011 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/hackerFiles.py | 1 | 2750 | import webapp2
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import urllib
import json
import logging
import hacker_page
import operator
#Todo: shouldn't be called this.
class ChangeHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self, secret, key):
hacker = hacker_page.getHacker(secret)
if hacker is None:
logging.error("Attempted to change hacker's uploaded" + key + " but no hacker with key: " + secret)
return self.redirect('/')
newFiles = map(lambda f: f.key(), self.get_uploads(key))
multipleFileUpload = self.request.get('multiple') == "true"
if multipleFileUpload:
existingFiles = getattr(hacker, key, [])
value = existingFiles + newFiles
elif len(newFiles) > 0:
existingFile = getattr(hacker, key, None)
if existingFile:
blobstore.delete(existingFile)
value = newFiles[0]
else:
value = None
setattr(hacker, key, value)
hacker_page.putHacker(hacker)
downloadLinks = map(getDownloadLink, newFiles)
fileNames = getFileNames(newFiles)
self.response.write(json.dumps({"downloadLinks": downloadLinks, "fileNames" : fileNames}))
def getDownloadLink(blobKey):
return '/__serve/' + str(blobKey)
def newURL(secret, key):
return blobstore.create_upload_url('/secret/__change/' + secret + '/' + key)
class DeleteFileHandler(webapp2.RequestHandler):
def post(self, secret):
hacker = hacker_page.getHacker(secret)
if hacker is None:
logging.error("Attempted to change hacker's uploaded" + key + " but no hacker with key: " + secret)
return self.response.write("failure")
key = self.request.get('key')
files = getattr(hacker, key, [])
files.remove(self.request.get('blobKey'))
setattr(hacker, key, files)
blobstore.delete(self.request.get('blobKey'))
hacker_page.putHacker(hacker)
def getFileName(blobKey):
if blobKey is None:
return None
info = blobstore.BlobInfo.get(blobKey)
if info is None:
return None
return info.filename
def getFileNames(blobKeys):
if blobKeys is None:
return None
return map(getFileName, blobKeys)
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
class NewURLHandler(webapp2.RequestHandler):
def get(self, secret, key):
url = newURL(secret, key);
self.response.write(json.dumps({"newURL": url}))
| mit | 45ff476c3b44007bf29a1bc05d752a01 | 30.976744 | 111 | 0.656 | 3.782669 | false | false | false | false |
kivy/kivy | kivy/input/postproc/retaintouch.py | 80 | 3224 | '''
Retain Touch
============
Reuse touch to counter lost finger behavior
'''
__all__ = ('InputPostprocRetainTouch', )
from kivy.config import Config
from kivy.vector import Vector
import time
class InputPostprocRetainTouch(object):
'''
InputPostprocRetainTouch is a post-processor to delay the 'up' event of a
touch, to reuse it under certains conditions. This module is designed to
prevent lost finger touches on some hardware/setups.
Retain touch can be configured in the Kivy config file::
[postproc]
retain_time = 100
retain_distance = 50
The distance parameter is in the range 0-1000 and time is in milliseconds.
'''
def __init__(self):
self.timeout = Config.getint('postproc', 'retain_time') / 1000.0
self.distance = Config.getint('postproc', 'retain_distance') / 1000.0
self._available = []
self._links = {}
def process(self, events):
# check if module is disabled
if self.timeout == 0:
return events
d = time.time()
for etype, touch in events[:]:
if not touch.is_touch:
continue
if etype == 'end':
events.remove((etype, touch))
if touch.uid in self._links:
selection = self._links[touch.uid]
selection.ud.__pp_retain_time__ = d
self._available.append(selection)
del self._links[touch.uid]
else:
touch.ud.__pp_retain_time__ = d
self._available.append(touch)
elif etype == 'update':
if touch.uid in self._links:
selection = self._links[touch.uid]
selection.x = touch.x
selection.y = touch.y
selection.sx = touch.sx
selection.sy = touch.sy
events.remove((etype, touch))
events.append((etype, selection))
else:
pass
elif etype == 'begin':
# new touch, found the nearest one
selection = None
selection_distance = 99999
for touch2 in self._available:
touch_distance = Vector(touch2.spos).distance(touch.spos)
if touch_distance > self.distance:
continue
if touch2.__class__ != touch.__class__:
continue
if touch_distance < selection_distance:
# eligible for continuation
selection_distance = touch_distance
selection = touch2
if selection is None:
continue
self._links[touch.uid] = selection
self._available.remove(selection)
events.remove((etype, touch))
for touch in self._available[:]:
t = touch.ud.__pp_retain_time__
if d - t > self.timeout:
self._available.remove(touch)
events.append(('end', touch))
return events
| mit | b7266c5ccd06c1dea025312eb12cde39 | 33.666667 | 78 | 0.509305 | 4.826347 | false | true | false | false |
kivy/kivy | examples/widgets/compound_selection.py | 21 | 3036 | from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.app import runTouchApp
class SelectableGrid(FocusBehavior, CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
def print_selection(*l):
print('selected: ', [x.text for x in self.selected_nodes])
self.bind(selected_nodes=print_selection)
def keyboard_on_key_down(self, window, keycode, text, modifiers):
if super(SelectableGrid, self).keyboard_on_key_down(
window, keycode, text, modifiers):
return True
if self.select_with_key_down(window, keycode, text, modifiers):
return True
return False
def keyboard_on_key_up(self, window, keycode):
if super(SelectableGrid, self).keyboard_on_key_up(window, keycode):
return True
if self.select_with_key_up(window, keycode):
return True
return False
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node != last_node:
return node, idx
items = list(enumerate(self.get_selectable_nodes()))
'''If self.nodes_order_reversed (the default due to using
self.children which is reversed), the index is counted from the
starts of the selectable nodes, like normal but the nodes are traversed
in the reverse order.
'''
# start searching after the last selected node
if not self.nodes_order_reversed:
items = items[last_node_idx + 1:] + items[:last_node_idx + 1]
else:
items = items[:last_node_idx][::-1] + items[last_node_idx:][::-1]
for i, child in items:
if child.text.startswith(key):
return child, i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
| mit | 4ebd3aff39cf52b192a276640c36d2bf | 35.578313 | 79 | 0.614625 | 3.852792 | false | false | false | false |
kivy/kivy | examples/canvas/lines.py | 5 | 10144 | '''
Line (SmoothLine) Experiment
============================
This demonstrates the experimental and unfinished SmoothLine feature
for fast line drawing. You should see a multi-segment
path at the top of the screen, and sliders and buttons along the bottom.
You can click to add new points to the segment, change the transparency
and width of the line, or hit 'Animate' to see a set of sine and cosine
animations. The Cap and Joint buttons don't work: SmoothLine has not
implemented these features yet.
'''
from kivy.app import App
from kivy.properties import OptionProperty, NumericProperty, ListProperty, \
BooleanProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.clock import Clock
from math import cos, sin
Builder.load_string('''
<LinePlayground>:
canvas:
Color:
rgba: .4, .4, 1, root.alpha
Line:
points: self.points
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
dash_length: self.dash_length
dash_offset: self.dash_offset
dashes: self.dashes
Color:
rgba: .8, .8, .8, root.alpha_controlline
Line:
points: self.points
close: self.close
dash_length: self.dash_length
dash_offset: self.dash_offset
dashes: self.dashes
Color:
rgba: 1, .4, .4, root.alpha
Line:
points: self.points2
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
dash_length: self.dash_length
dash_offset: self.dash_offset
dashes: self.dashes
GridLayout:
cols: 2
size_hint: 1, None
height: 44 * 5
GridLayout:
cols: 2
Label:
text: 'Alpha'
Slider:
value: root.alpha
on_value: root.alpha = float(args[1])
min: 0.
max: 1.
Label:
text: 'Alpha Control Line'
Slider:
value: root.alpha_controlline
on_value: root.alpha_controlline = float(args[1])
min: 0.
max: 1.
Label:
text: 'Width'
Slider:
value: root.linewidth
on_value: root.linewidth = args[1]
min: 1
max: 40
Label:
text: 'Cap'
GridLayout:
rows: 1
ToggleButton:
group: 'cap'
text: 'none'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'round'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'square'
on_press: root.cap = self.text
Label:
text: 'Joint'
GridLayout:
rows: 1
ToggleButton:
group: 'joint'
text: 'none'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'round'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'miter'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'bevel'
on_press: root.joint = self.text
Label:
text: 'Close'
ToggleButton:
text: 'Close line'
on_press: root.close = self.state == 'down'
Label:
text: 'Dashes'
GridLayout:
rows: 1
ToggleButton:
group: 'dashes'
text: 'none'
state: 'down'
allow_no_selection: False
size_hint_x: None
width: self.texture_size[0]
padding_x: '5dp'
on_state:
if self.state == 'down': root.dashes = []
if self.state == 'down': root.dash_length = 1
if self.state == 'down': root.dash_offset = 0
ToggleButton:
id: constant
group: 'dashes'
text: 'Constant: '
allow_no_selection: False
size_hint_x: None
width: self.texture_size[0]
padding_x: '5dp'
on_state:
if self.state == 'down': root.dashes = []
if self.state == 'down': root.dash_length = \
int(dash_len.text or 1)
if self.state == 'down': root.dash_offset = \
int(dash_offset.text or 0)
Label:
text: 'len'
size_hint_x: None
width: self.texture_size[0]
padding_x: '5dp'
TextInput:
id: dash_len
size_hint_x: None
width: '30dp'
input_filter: 'int'
multiline: False
text: '1'
on_text: if constant.state == 'down': \
root.dash_length = int(self.text or 1)
Label:
text: 'offset'
size_hint_x: None
width: self.texture_size[0]
padding_x: '5dp'
TextInput:
id: dash_offset
size_hint_x: None
width: '30dp'
input_filter: 'int'
multiline: False
text: '0'
on_text: if constant.state == 'down': \
root.dash_offset = int(self.text or 0)
ToggleButton:
id: dash_list
group: 'dashes'
text: 'List: '
allow_no_selection: False
size_hint_x: None
width: self.texture_size[0]
padding_x: '5dp'
on_state:
if self.state == 'down': root.dashes = list(map(lambda\
x: int(x or 0), dash_list_in.text.split(',')))
if self.state == 'down': root.dash_length = 1
if self.state == 'down': root.dash_offset = 0
TextInput:
id: dash_list_in
size_hint_x: None
width: '180dp'
multiline: False
text: '4,3,10,15'
on_text: if dash_list.state == 'down': root.dashes = \
list(map(lambda x: int(x or 0), self.text.split(',')))
AnchorLayout:
GridLayout:
cols: 1
size_hint: None, None
size: self.minimum_size
ToggleButton:
size_hint: None, None
size: 100, 44
text: 'Animate'
on_state: root.animate(self.state == 'down')
Button:
size_hint: None, None
size: 100, 44
text: 'Clear'
on_press: root.points = root.points2 = []
''')
class LinePlayground(FloatLayout):
alpha_controlline = NumericProperty(1.0)
alpha = NumericProperty(0.5)
close = BooleanProperty(False)
points = ListProperty([(500, 500),
[300, 300, 500, 300],
[500, 400, 600, 400]])
points2 = ListProperty([])
joint = OptionProperty('none', options=('round', 'miter', 'bevel', 'none'))
cap = OptionProperty('none', options=('round', 'square', 'none'))
linewidth = NumericProperty(10.0)
dt = NumericProperty(0)
dash_length = NumericProperty(1)
dash_offset = NumericProperty(0)
dashes = ListProperty([])
_update_points_animation_ev = None
def on_touch_down(self, touch):
if super(LinePlayground, self).on_touch_down(touch):
return True
touch.grab(self)
self.points.append(touch.pos)
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
self.points[-1] = touch.pos
return True
return super(LinePlayground, self).on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
return True
return super(LinePlayground, self).on_touch_up(touch)
def animate(self, do_animation):
if do_animation:
self._update_points_animation_ev = Clock.schedule_interval(
self.update_points_animation, 0)
elif self._update_points_animation_ev is not None:
self._update_points_animation_ev.cancel()
def update_points_animation(self, dt):
cy = self.height * 0.6
cx = self.width * 0.1
w = self.width * 0.8
step = 20
points = []
points2 = []
self.dt += dt
for i in range(int(w / step)):
x = i * step
points.append(cx + x)
points.append(cy + cos(x / w * 8. + self.dt) * self.height * 0.2)
points2.append(cx + x)
points2.append(cy + sin(x / w * 8. + self.dt) * self.height * 0.2)
self.points = points
self.points2 = points2
class TestLineApp(App):
def build(self):
return LinePlayground()
if __name__ == '__main__':
TestLineApp().run()
| mit | a9455366a21dd9dd9ae314a0661f1bb2 | 33.27027 | 79 | 0.451203 | 4.42197 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests_oauthlib/oauth1_session.py | 6 | 16132 | from __future__ import unicode_literals
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import logging
from oauthlib.common import add_params_to_uri
from oauthlib.common import urldecode as _urldecode
from oauthlib.oauth1 import (
SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
)
import requests
from . import OAuth1
import sys
if sys.version > "3":
unicode = str
log = logging.getLogger(__name__)
def urldecode(body):
"""Parse query or json to python dictionary"""
try:
return _urldecode(body)
except:
import json
return json.loads(body)
class TokenRequestDenied(ValueError):
def __init__(self, message, status_code):
super(TokenRequestDenied, self).__init__(message)
self.status_code = status_code
class TokenMissing(ValueError):
def __init__(self, message, response):
super(TokenMissing, self).__init__(message)
self.response = response
class VerifierMissing(ValueError):
pass
class OAuth1Session(requests.Session):
"""Request signing and convenience methods for the oauth dance.
What is the difference between OAuth1Session and OAuth1?
OAuth1Session actually uses OAuth1 internally and it's purpose is to assist
in the OAuth workflow through convenience methods to prepare authorization
URLs and parse the various token and redirection responses. It also provide
rudimentary validation of responses.
An example of the OAuth workflow using a basic CLI app and Twitter.
>>> # Credentials obtained during the registration.
>>> client_key = 'client key'
>>> client_secret = 'secret'
>>> callback_uri = 'https://127.0.0.1/callback'
>>>
>>> # Endpoints found in the OAuth provider API documentation
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>>
>>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
>>>
>>> # First step, fetch the request token.
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'kjerht2309u',
'oauth_token_secret': 'lsdajfh923874',
}
>>>
>>> # Second step. Follow this link and authorize
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
>>>
>>> # Third step. Fetch the access token
>>> redirect_response = raw_input('Paste the full redirect URL here.')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> # Done. You can now make OAuth requests.
>>> status_url = 'http://api.twitter.com/1/statuses/update.json'
>>> new_status = {'status': 'hello world!'}
>>> oauth_session.post(status_url, data=new_status)
<Response [200]>
"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
client_class=None,
force_include_body=False,
**kwargs):
"""Construct the OAuth 1 session.
:param client_key: A client specific identifier.
:param client_secret: A client specific secret used to create HMAC and
plaintext signatures.
:param resource_owner_key: A resource owner key, also referred to as
request token or access token depending on
when in the workflow it is used.
:param resource_owner_secret: A resource owner secret obtained with
either a request or access token. Often
referred to as token secret.
:param callback_uri: The URL the user is redirect back to after
authorization.
:param signature_method: Signature methods determine how the OAuth
signature is created. The three options are
oauthlib.oauth1.SIGNATURE_HMAC (default),
oauthlib.oauth1.SIGNATURE_RSA and
oauthlib.oauth1.SIGNATURE_PLAIN.
:param signature_type: Signature type decides where the OAuth
parameters are added. Either in the
Authorization header (default) or to the URL
query parameters or the request body. Defined as
oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
oauthlib.oauth1.SIGNATURE_TYPE_BODY
respectively.
:param rsa_key: The private RSA key as a string. Can only be used with
signature_method=oauthlib.oauth1.SIGNATURE_RSA.
:param verifier: A verifier string to prove authorization was granted.
:param client_class: A subclass of `oauthlib.oauth1.Client` to use with
`requests_oauthlib.OAuth1` instead of the default
:param force_include_body: Always include the request body in the
signature creation.
:param **kwargs: Additional keyword arguments passed to `OAuth1`
"""
super(OAuth1Session, self).__init__()
self._client = OAuth1(client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=callback_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
client_class=client_class,
force_include_body=force_include_body,
**kwargs)
self.auth = self._client
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
if self._client.client.signature_method == SIGNATURE_RSA:
# RSA only uses resource_owner_key
return bool(self._client.client.resource_owner_key)
else:
# other methods of authentication use all three pieces
return (
bool(self._client.client.client_secret) and
bool(self._client.client.resource_owner_key) and
bool(self._client.client.resource_owner_secret)
)
def authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
An example using a registered default callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
>>> oauth_session.authorization_url(authorization_url, foo='bar')
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
An example using an explicit callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
"""
kwargs['oauth_token'] = request_token or self._client.client.resource_owner_key
log.debug('Adding parameters %s to url %s', kwargs, url)
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, realm=None):
"""Fetch a request token.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: The request token endpoint URL.
:param realm: A list of realms to request access to.
:returns: The response in dict format.
Note that a previously set callback_uri will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
self._client.client.realm = ' '.join(realm) if realm else None
token = self._fetch_token(url)
log.debug('Resetting callback_uri and realm (not needed in next phase).')
self._client.client.callback_uri = None
self._client.client.realm = None
return token
def fetch_access_token(self, url, verifier=None):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, 'verifier', None):
raise VerifierMissing('No client verifier has been set.')
token = self._fetch_token(url)
log.debug('Resetting verifier attribute, should not be used anymore.')
self._client.client.verifier = None
return token
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
"""
log.debug('Parsing token from query part of url %s', url)
token = dict(urldecode(urlparse(url).query))
log.debug('Updating internal client token attribute.')
self._populate_attributes(token)
return token
def _populate_attributes(self, token):
if 'oauth_token' in token:
self._client.client.resource_owner_key = token['oauth_token']
else:
raise TokenMissing(
'Response does not contain a token: {resp}'.format(resp=token),
token,
)
if 'oauth_token_secret' in token:
self._client.client.resource_owner_secret = (
token['oauth_token_secret'])
if 'oauth_verifier' in token:
self._client.client.verifier = token['oauth_verifier']
def _fetch_token(self, url):
log.debug('Fetching token from %s using client %s', url, self._client.client)
r = self.post(url)
if r.status_code >= 400:
error = "Token request failed with code %s, response was '%s'."
raise TokenRequestDenied(error % (r.status_code, r.text), r.status_code)
log.debug('Decoding token from response "%s"', r.text)
try:
token = dict(urldecode(r.text))
except ValueError as e:
error = ("Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was %s""" % e)
raise ValueError(error)
log.debug('Obtained token %s', token)
log.debug('Updating internal client attributes from token data.')
self._populate_attributes(token)
return token
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if 'Authorization' in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop('Authorization', True)
prepared_request.prepare_auth(self.auth)
return
| mit | 56b8b55d6f438b6d2b4f566cf47fa99c | 42.48248 | 145 | 0.623915 | 4.237457 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/cssutils/helper.py | 4 | 3982 | """cssutils helper TEST
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: errorhandler.py 1234 2008-05-22 20:26:12Z cthedot $'
import os
import re
import sys
import urllib
class Deprecated(object):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
It accepts a single paramter ``msg`` which is shown with the warning.
It should contain information which function or method to use instead.
"""
def __init__(self, msg):
self.msg = msg
def __call__(self, func):
def newFunc(*args, **kwargs):
import warnings
warnings.warn("Call to deprecated method %r. %s" %
(func.__name__, self.msg),
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
# simple escapes, all non unicodes
_simpleescapes = re.compile(ur'(\\[^0-9a-fA-F])').sub
def normalize(x):
"""
normalizes x, namely:
- remove any \ before non unicode sequences (0-9a-zA-Z) so for
x=="c\olor\" return "color" (unicode escape sequences should have
been resolved by the tokenizer already)
- lowercase
"""
if x:
def removeescape(matchobj):
return matchobj.group(0)[1:]
x = _simpleescapes(removeescape, x)
return x.lower()
else:
return x
def path2url(path):
"""Return file URL of `path`"""
return u'file:' + urllib.pathname2url(os.path.abspath(path))
def pushtoken(token, tokens):
"""Return new generator starting with token followed by all tokens in
``tokens``"""
# TODO: may use itertools.chain?
yield token
for t in tokens:
yield t
def string(value):
"""
Serialize value with quotes e.g.::
``a \'string`` => ``'a \'string'``
"""
# \n = 0xa, \r = 0xd, \f = 0xc
value = value.replace(u'\n', u'\\a ').replace(
u'\r', u'\\d ').replace(
u'\f', u'\\c ').replace(
u'"', u'\\"')
if value.endswith(u'\\'):
value = value[:-1] + u'\\\\'
return u'"%s"' % value
def stringvalue(string):
"""
Retrieve actual value of string without quotes. Escaped
quotes inside the value are resolved, e.g.::
``'a \'string'`` => ``a 'string``
"""
return string.replace(u'\\'+string[0], string[0])[1:-1]
_match_forbidden_in_uri = re.compile(ur'''.*?[\(\)\s\;,'"]''', re.U).match
def uri(value):
"""
Serialize value by adding ``url()`` and with quotes if needed e.g.::
``"`` => ``url("\"")``
"""
if _match_forbidden_in_uri(value):
value = string(value)
return u'url(%s)' % value
def urivalue(uri):
"""
Return actual content without surrounding "url(" and ")"
and removed surrounding quotes too including contained
escapes of quotes, e.g.::
``url("\"")`` => ``"``
"""
uri = uri[uri.find('(')+1:-1].strip()
if uri and (uri[0] in '\'"') and (uri[0] == uri[-1]):
return stringvalue(uri)
else:
return uri
#def normalnumber(num):
# """
# Return normalized number as string.
# """
# sign = ''
# if num.startswith('-'):
# sign = '-'
# num = num[1:]
# elif num.startswith('+'):
# num = num[1:]
#
# if float(num) == 0.0:
# return '0'
# else:
# if num.find('.') == -1:
# return sign + str(int(num))
# else:
# a, b = num.split('.')
# if not a:
# a = '0'
# return '%s%s.%s' % (sign, int(a), b)
| mit | e324ab58e56905c1041591c8665984ba | 27.065693 | 74 | 0.510799 | 3.62 | false | false | false | false |
kivy/kivy | kivy/effects/kinetic.py | 3 | 5778 | '''
Kinetic effect
==============
.. versionadded:: 1.7.0
The :class:`KineticEffect` is the base class that is used to compute the
velocity out of a movement. When the movement is finished, the effect will
compute the position of the movement according to the velocity, and reduce the
velocity with a friction. The movement stop until the velocity is 0.
Conceptually, the usage could be::
>>> effect = KineticEffect()
>>> effect.start(10)
>>> effect.update(15)
>>> effect.update(30)
>>> effect.stop(48)
Over the time, you will start a movement of a value, update it, and stop the
movement. At this time, you'll get the movement value into
:attr:`KineticEffect.value`. On the example i've typed manually, the computed
velocity will be::
>>> effect.velocity
3.1619100231163046
After multiple clock interaction, the velocity will decrease according to
:attr:`KineticEffect.friction`. The computed value will be stored in
:attr:`KineticEffect.value`. The output of this `value` could be::
46.30038145219605
54.58302451968686
61.9229016256196
# ...
'''
__all__ = ('KineticEffect', )
from time import time
from kivy.event import EventDispatcher
from kivy.properties import NumericProperty, BooleanProperty
from kivy.clock import Clock
class KineticEffect(EventDispatcher):
'''Kinetic effect class. See module documentation for more information.
'''
velocity = NumericProperty(0)
'''Velocity of the movement.
:attr:`velocity` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
friction = NumericProperty(0.05)
'''Friction to apply on the velocity
:attr:`friction` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.05.
'''
value = NumericProperty(0)
'''Value (during the movement and computed) of the effect.
:attr:`value` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
is_manual = BooleanProperty(False)
'''Indicate if a movement is in progress (True) or not (False).
:attr:`is_manual` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
max_history = NumericProperty(5)
'''Save up to `max_history` movement value into the history. This is used
for correctly calculating the velocity according to the movement.
:attr:`max_history` is a :class:`~kivy.properties.NumericProperty` and
defaults to 5.
'''
min_distance = NumericProperty(.1)
'''The minimal distance for a movement to have nonzero velocity.
.. versionadded:: 1.8.0
:attr:`min_distance` is :class:`~kivy.properties.NumericProperty` and
defaults to 0.1.
'''
min_velocity = NumericProperty(.5)
'''Velocity below this quantity is normalized to 0. In other words,
any motion whose velocity falls below this number is stopped.
.. versionadded:: 1.8.0
:attr:`min_velocity` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.5.
'''
std_dt = NumericProperty(0.017)
''' std_dt
correction update_velocity if dt is not constant
.. versionadded:: 2.0.0
:attr:`std_dt` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.017.
'''
def __init__(self, **kwargs):
self.history = []
self.trigger_velocity_update = Clock.create_trigger(
self.update_velocity, 0)
super(KineticEffect, self).__init__(**kwargs)
def apply_distance(self, distance):
if abs(distance) < self.min_distance:
self.velocity = 0
self.value += distance
def start(self, val, t=None):
'''Start the movement.
:Parameters:
`val`: float or int
Value of the movement
`t`: float, defaults to None
Time when the movement happen. If no time is set, it will use
time.time()
'''
self.is_manual = True
t = t or time()
self.velocity = 0
self.history = [(t, val)]
def update(self, val, t=None):
'''Update the movement.
See :meth:`start` for the arguments.
'''
t = t or time()
distance = val - self.history[-1][1]
self.apply_distance(distance)
self.history.append((t, val))
if len(self.history) > self.max_history:
self.history.pop(0)
def stop(self, val, t=None):
'''Stop the movement.
See :meth:`start` for the arguments.
'''
self.is_manual = False
t = t or time()
distance = val - self.history[-1][1]
self.apply_distance(distance)
newest_sample = (t, val)
old_sample = self.history[0]
for sample in self.history:
if (newest_sample[0] - sample[0]) < 10. / 60.:
break
old_sample = sample
distance = newest_sample[1] - old_sample[1]
duration = abs(newest_sample[0] - old_sample[0])
self.velocity = (distance / max(duration, 0.0001))
self.trigger_velocity_update()
def cancel(self):
'''Cancel a movement. This can be used in case :meth:`stop` cannot be
called. It will reset :attr:`is_manual` to False, and compute the
movement if the velocity is > 0.
'''
self.is_manual = False
self.trigger_velocity_update()
def update_velocity(self, dt):
'''(internal) Update the velocity according to the frametime and
friction.
'''
if abs(self.velocity) <= self.min_velocity:
self.velocity = 0
return
self.velocity -= self.velocity * self.friction * dt / self.std_dt
self.apply_distance(self.velocity * dt)
self.trigger_velocity_update()
| mit | 738b5304b235175928ef411b5fc8f647 | 28.937824 | 78 | 0.626168 | 3.862299 | false | false | false | false |
kivy/kivy | kivy/uix/recycleboxlayout.py | 21 | 6178 | """
RecycleBoxLayout
================
.. versionadded:: 1.10.0
.. warning::
This module is highly experimental, its API may change in the future and
the documentation is not complete at this time.
The RecycleBoxLayout is designed to provide a
:class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the
:class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the
:mod:`~kivy.uix.recycleview` module documentation for more information.
"""
from kivy.uix.recyclelayout import RecycleLayout
from kivy.uix.boxlayout import BoxLayout
__all__ = ('RecycleBoxLayout', )
class RecycleBoxLayout(RecycleLayout, BoxLayout):
_rv_positions = None
def __init__(self, **kwargs):
super(RecycleBoxLayout, self).__init__(**kwargs)
self.funbind('children', self._trigger_layout)
def _update_sizes(self, changed):
horizontal = self.orientation == 'horizontal'
padding_left, padding_top, padding_right, padding_bottom = self.padding
padding_x = padding_left + padding_right
padding_y = padding_top + padding_bottom
selfw = self.width
selfh = self.height
layout_w = max(0, selfw - padding_x)
layout_h = max(0, selfh - padding_y)
cx = self.x + padding_left
cy = self.y + padding_bottom
view_opts = self.view_opts
remove_view = self.remove_view
for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),
(shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),
(shwn_max, shhn_max), ph, phn) in changed:
if (horizontal and
(shw != shnw or w != wn or shw_min != shwn_min or
shw_max != shwn_max) or
not horizontal and
(shh != shnh or h != hn or shh_min != shhn_min or
shh_max != shhn_max)):
return True
remove_view(widget, index)
opt = view_opts[index]
if horizontal:
wo, ho = opt['size']
if shnh is not None:
_, h = opt['size'] = [wo, shnh * layout_h]
else:
h = ho
xo, yo = opt['pos']
for key, value in phn.items():
posy = value * layout_h
if key == 'y':
yo = posy + cy
elif key == 'top':
yo = posy - h
elif key == 'center_y':
yo = posy - (h / 2.)
opt['pos'] = [xo, yo]
else:
wo, ho = opt['size']
if shnw is not None:
w, _ = opt['size'] = [shnw * layout_w, ho]
else:
w = wo
xo, yo = opt['pos']
for key, value in phn.items():
posx = value * layout_w
if key == 'x':
xo = posx + cx
elif key == 'right':
xo = posx - w
elif key == 'center_x':
xo = posx - (w / 2.)
opt['pos'] = [xo, yo]
return False
def compute_layout(self, data, flags):
super(RecycleBoxLayout, self).compute_layout(data, flags)
changed = self._changed_views
if (changed is None or
changed and not self._update_sizes(changed)):
return
self.clear_layout()
self._rv_positions = None
if not data:
l, t, r, b = self.padding
self.minimum_size = l + r, t + b
return
view_opts = self.view_opts
n = len(view_opts)
for i, x, y, w, h in self._iterate_layout(
[(opt['size'], opt['size_hint'], opt['pos_hint'],
opt['size_hint_min'], opt['size_hint_max']) for
opt in reversed(view_opts)]):
opt = view_opts[n - i - 1]
shw, shh = opt['size_hint']
opt['pos'] = x, y
wo, ho = opt['size']
# layout won't/shouldn't change previous size if size_hint is None
# which is what w/h being None means.
opt['size'] = [(wo if shw is None else w),
(ho if shh is None else h)]
spacing = self.spacing
pos = self._rv_positions = [None, ] * len(data)
if self.orientation == 'horizontal':
pos[0] = self.x
last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \
spacing / 2.
for i, val in enumerate(view_opts[1:], 1):
pos[i] = last
last += val['size'][0] + spacing
else:
last = pos[-1] = \
self.y + self.height - self.padding[1] - \
view_opts[0]['size'][1] - spacing / 2.
n = len(view_opts)
for i, val in enumerate(view_opts[1:], 1):
last -= spacing + val['size'][1]
pos[n - 1 - i] = last
def get_view_index_at(self, pos):
calc_pos = self._rv_positions
if not calc_pos:
return 0
x, y = pos
if self.orientation == 'horizontal':
if x >= calc_pos[-1] or len(calc_pos) == 1:
return len(calc_pos) - 1
ix = 0
for val in calc_pos[1:]:
if x < val:
return ix
ix += 1
else:
if y >= calc_pos[-1] or len(calc_pos) == 1:
return 0
iy = 0
for val in calc_pos[1:]:
if y < val:
return len(calc_pos) - iy - 1
iy += 1
assert False
def compute_visible_views(self, data, viewport):
if self._rv_positions is None or not data:
return []
x, y, w, h = viewport
at_idx = self.get_view_index_at
if self.orientation == 'horizontal':
a, b = at_idx((x, y)), at_idx((x + w, y))
else:
a, b = at_idx((x, y + h)), at_idx((x, y))
return list(range(a, b + 1))
| mit | bd3cd1cfd23d2fcf025eb1fcf2748187 | 32.759563 | 79 | 0.46439 | 3.78786 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/urllib3/__init__.py | 649 | 1701 | # urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| mit | d3165d298047502c3746f4b39bf746aa | 28.327586 | 84 | 0.715461 | 3.901376 | false | false | false | false |
kivy/kivy | kivy/tools/pep8checker/pep8kivy.py | 4 | 3343 | import sys
from os import walk
from os.path import isdir, join, normpath
import pep8
pep8_ignores = (
'E125', # continuation line does not
# distinguish itself from next logical line
'E126', # continuation line over-indented for hanging indent
'E127', # continuation line over-indented for visual indent
'E128', # continuation line under-indented for visual indent
'E402', # module level import not at top of file
'E741', # ambiguous variable name
'E731', # do not assign a lambda expression, use a def
'W503', # allow putting binary operators after line split
)
class KivyStyleChecker(pep8.Checker):
def __init__(self, filename):
pep8.Checker.__init__(self, filename, ignore=pep8_ignores)
def report_error(self, line_number, offset, text, check):
return pep8.Checker.report_error(
self, line_number, offset, text, check)
if __name__ == '__main__':
print("DEPRECATED: Use pre-commit.com framework instead: ",
"pip install pre-commit && make hook")
def usage():
print('Usage: python pep8kivy.py <file_or_folder_to_check>*')
print('Folders will be checked recursively.')
sys.exit(1)
if len(sys.argv) < 2:
usage()
elif sys.argv == 2:
targets = sys.argv[-1]
else:
targets = sys.argv[-1].split()
def check(fn):
try:
checker = KivyStyleChecker(fn)
except IOError:
# File couldn't be opened, so was deleted apparently.
# Don't check deleted files.
return 0
return checker.check_all()
errors = 0
exclude_dirs = [
'kivy/lib',
'kivy/deps',
'kivy/tools/pep8checker',
'coverage',
'doc'
]
exclude_dirs = [normpath(i) for i in exclude_dirs]
exclude_files = [
'kivy/gesture.py',
'kivy/tools/stub-gl-debug.py',
'kivy/modules/webdebugger.py',
'kivy/modules/_webdebugger.py'
]
exclude_files = [normpath(i) for i in exclude_files]
for target in targets:
if isdir(target):
for dirpath, dirnames, filenames in walk(target):
cont = False
dpath = normpath(dirpath)
for pat in exclude_dirs:
if dpath.startswith(pat):
cont = True
break
if cont:
continue
for filename in filenames:
if not filename.endswith('.py'):
continue
cont = False
complete_filename = join(dirpath, filename)
for pat in exclude_files:
if complete_filename.endswith(pat):
cont = True
if cont:
continue
errors += check(complete_filename)
else:
# Got a single file to check
for pat in exclude_dirs + exclude_files:
if pat in target:
break
else:
if target.endswith('.py'):
errors += check(target)
if errors:
print("Error: {} style guide violation(s) encountered.".format(errors))
sys.exit(1)
| mit | 68e2fb39ac4e2ff7f97bc3128c937800 | 29.669725 | 79 | 0.535746 | 4.375654 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/packages/charade/utf8prober.py | 207 | 2728 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| mit | e6f7668111be1277c540b6e71f0f4837 | 33.894737 | 69 | 0.615469 | 4.337043 | false | false | false | false |
kivy/kivy | kivy/core/video/video_gstplayer.py | 5 | 4006 | '''
Video Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the preferred player, using Gstreamer 1.0, working on both
Python 2 and 3.
'''
try:
from kivy.lib.gstplayer import GstPlayer, get_gst_version
except ImportError:
from kivy.core import handle_win_lib_import_error
handle_win_lib_import_error(
'VideoGstplayer', 'gst', 'kivy.lib.gstplayer._gstplayer')
raise
from kivy.graphics.texture import Texture
from kivy.core.video import VideoBase
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.compat import PY2
from threading import Lock
from functools import partial
from os.path import realpath
from weakref import ref
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('VideoGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_buffer(video, width, height, data):
video = video()
# if we still receive the video but no more player, remove it.
if not video:
return
with video._buffer_lock:
video._buffer = (width, height, data)
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('VideoGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('VideoGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('VideoGstplayer: {}'.format(message))
class VideoGstplayer(VideoBase):
def __init__(self, **kwargs):
self.player = None
self._buffer = None
self._buffer_lock = Lock()
super(VideoGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._do_eos, 0)
def load(self):
Logger.debug('VideoGstplayer: Load <{}>'.format(self._filename))
uri = self._get_uri()
wk_self = ref(self)
self.player_callback = partial(_on_gstplayer_buffer, wk_self)
self.player = GstPlayer(uri, self.player_callback,
self._on_gst_eos_sync, _on_gstplayer_message)
self.player.load()
def unload(self):
if self.player:
self.player.unload()
self.player = None
with self._buffer_lock:
self._buffer = None
self._texture = None
def stop(self):
super(VideoGstplayer, self).stop()
self.player.stop()
def pause(self):
super(VideoGstplayer, self).pause()
self.player.pause()
def play(self):
super(VideoGstplayer, self).play()
self.player.set_volume(self.volume)
self.player.play()
def seek(self, percent, precise=True):
self.player.seek(percent)
def _get_position(self):
return self.player.get_position()
def _get_duration(self):
return self.player.get_duration()
def _set_volume(self, value):
self._volume = value
if self.player:
self.player.set_volume(self._volume)
def _update(self, dt):
buf = None
with self._buffer_lock:
buf = self._buffer
self._buffer = None
if buf is not None:
self._update_texture(buf)
self.dispatch('on_frame')
def _update_texture(self, buf):
width, height, data = buf
# texture is not allocated yet, create it first
if not self._texture:
self._texture = Texture.create(size=(width, height),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(
data, size=(width, height), colorfmt='rgb')
def _get_uri(self):
uri = self.filename
if not uri:
return
if '://' not in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
| mit | 820b6726918fe8c864d18282b2a5ac09 | 27.614286 | 78 | 0.603844 | 3.811608 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/raffle.py | 1 | 1032 | import webapp2
from template import template
from social_import import Post
import random
import datetime
from google.appengine.ext import ndb
class RaffleHandler(webapp2.RequestHandler):
def get(self):
self.response.write(template("raffle.html", {"service": self.request.get('service')}))
def post(self):
service = self.request.get('service')
params = {"result": True, "service": service}
if service == 'twitter':
post = draw_post('twitter/search/#hackatbrown')
params['url'] = 'http://twitter.com/{0}/status/{1}'.format(post.poster, post.id)
params['text'] = post.text
else:
post = draw_post('instagram/tag/hackatbrown')
params['url'] = post.instagram_link
params['name'] = post.poster
self.response.write(template("raffle.html", params))
def draw_post(feed):
after = datetime.datetime.now() - datetime.timedelta(days=3)
q = Post.query(ndb.AND(Post.feed == feed, Post.date >= after))
count = q.count()
i = int(random.random() * count)
print 'INDEX', i
return q.fetch(offset=i, limit=1)[0]
| mit | 8d85d3b79ac9bab82cd3206298c16758 | 32.290323 | 88 | 0.700581 | 3.071429 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/htmlmin/main.py | 1 | 7729 | """
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cgi
import re
from . import parser
def minify(input,
remove_comments=False,
remove_empty_space=False,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
keep_pre=False,
pre_tags=parser.PRE_TAGS,
pre_attr='pre'):
"""Minifies HTML in one shot.
:param input: A string containing the HTML to be minified.
:param remove_comments: Remove comments found in HTML. Individual comments can
be maintained by putting a ``!`` as the first character inside the comment.
Thus::
<!-- FOO --> <!--! BAR -->
Will become simply::
<!-- BAR -->
The added exclamation is removed.
:param remove_empty_space: Remove empty space found in HTML between an opening
and a closing tag and when it contains a newline or carriage return. If
whitespace is found that is only spaces and/or tabs, it will be turned into
a single space. Be careful, this can have unintended consequences.
:param remove_all_empty_space: A more extreme version of
``remove_empty_space``, this removes all empty whitespace found between
tags. This is almost gauranteed to break your HTML unless you are very
careful.
nothing
:param reduce_boolean_attributes: Where allowed by the HTML5 specification,
attributes such as 'disabled' and 'readonly' will have their value removed,
so 'disabled="true"' will simply become 'disabled'. This is generally a
good option to turn on except when JavaScript relies on the values.
:param remove_optional_attribute_quotes: When True, optional quotes around
attributes are removed. When False, all attribute quotes are left intact.
Defaults to True.
:param keep_pre: By default, htmlmin uses the special attribute ``pre`` to
allow you to demarcate areas of HTML that should not be minified. It removes
this attribute as it finds it. Setting this value to ``True`` tells htmlmin
to leave the attribute in the output.
:param pre_tags: A list of tag names that should never be minified. You are
free to change this list as you see fit, but you will probably want to
include ``pre`` and ``textarea`` if you make any changes to the list. Note
that ``<script>`` and ``<style>`` tags are never minimized.
:param pre_attr: Specifies the attribute that, when found in an HTML tag,
indicates that the content of the tag should not be minified. Defaults to
``pre``.
:return: A string containing the minified HTML.
If you are going to be minifying multiple HTML documents, each with the same
settings, consider using :class:`.Minifier`.
"""
minifier = parser.HTMLMinParser(
remove_comments=remove_comments,
remove_empty_space=remove_empty_space,
remove_all_empty_space=remove_all_empty_space,
reduce_empty_attributes=reduce_empty_attributes,
reduce_boolean_attributes=reduce_boolean_attributes,
remove_optional_attribute_quotes=remove_optional_attribute_quotes,
keep_pre=keep_pre,
pre_tags=pre_tags,
pre_attr=pre_attr)
minifier.feed(input)
minifier.close()
return minifier.result
class Minifier(object):
"""An object that supports HTML Minification.
Options are passed into this class at initialization time and are then
persisted across each use of the instance. If you are going to be minifying
multiple peices of HTML, this will be more efficient than using
:class:`htmlmin.minify`.
See :class:`htmlmin.minify` for an explanation of options.
"""
def __init__(self,
remove_comments=False,
remove_empty_space=False,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
keep_pre=False,
pre_tags=parser.PRE_TAGS,
pre_attr='pre'):
"""Initialize the Minifier.
See :class:`htmlmin.minify` for an explanation of options.
"""
self._parser = parser.HTMLMinParser(
remove_comments=remove_comments,
remove_empty_space=remove_empty_space,
remove_all_empty_space=remove_all_empty_space,
reduce_empty_attributes=reduce_empty_attributes,
reduce_boolean_attributes=reduce_boolean_attributes,
remove_optional_attribute_quotes=remove_optional_attribute_quotes,
keep_pre=keep_pre,
pre_tags=pre_tags,
pre_attr=pre_attr)
def minify(self, *input):
"""Runs HTML through the minifier in one pass.
:param input: HTML to be fed into the minimizer. Multiple chunks of HTML
can be provided, and they are fed in sequentially as if they were
concatenated.
:returns: A string containing the minified HTML.
This is the simplest way to use an existing ``Minifier`` instance. This
method takes in HTML and minfies it, returning the result. Note that this
method resets the internal state of the parser before it does any work. If
there is pending HTML in the buffers, it will be lost.
"""
self._parser.reset()
self.input(*input)
return self.finalize()
def input(self, *input):
"""Feed more HTML into the input stream
:param input: HTML to be fed into the minimizer. Multiple chunks of HTML
can be provided, and they are fed in sequentially as if they were
concatenated. You can also call this method multiple times to achieve
the same effect.
"""
for i in input:
self._parser.feed(i)
@property
def output(self):
"""Retrieve the minified output generated thus far.
"""
return self._parser.result
def finalize(self):
"""Finishes current input HTML and returns mininified result.
This method flushes any remaining input HTML and returns the minified
result. It resets the state of the internal parser in the process so that
new HTML can be minified. Be sure to call this method before you reuse
the ``Minifier`` instance on a new HTML document.
"""
self._parser.close()
result = self._parser.result
self._parser.reset()
return result
| mit | e0e291a7a7a8dc651614f2cf0bdb498b | 40.778378 | 80 | 0.711476 | 4.313058 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/slimit/ast.py | 9 | 10841 | ###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
class Node(object):
def __init__(self, children=None):
self._children_list = [] if children is None else children
def __iter__(self):
for child in self.children():
if child is not None:
yield child
def children(self):
return self._children_list
def to_ecma(self):
# Can't import at module level as ecmavisitor depends
# on ast module...
from slimit.visitors.ecmavisitor import ECMAVisitor
visitor = ECMAVisitor()
return visitor.visit(self)
class Program(Node):
pass
class Block(Node):
pass
class Boolean(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Null(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Number(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Identifier(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class String(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Regex(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Array(Node):
def __init__(self, items):
self.items = items
def children(self):
return self.items
class Object(Node):
def __init__(self, properties=None):
self.properties = [] if properties is None else properties
def children(self):
return self.properties
class NewExpr(Node):
def __init__(self, identifier, args=None):
self.identifier = identifier
self.args = [] if args is None else args
def children(self):
return [self.identifier, self.args]
class FunctionCall(Node):
def __init__(self, identifier, args=None):
self.identifier = identifier
self.args = [] if args is None else args
def children(self):
return [self.identifier] + self.args
class BracketAccessor(Node):
def __init__(self, node, expr):
self.node = node
self.expr = expr
def children(self):
return [self.node, self.expr]
class DotAccessor(Node):
def __init__(self, node, identifier):
self.node = node
self.identifier = identifier
def children(self):
return [self.node, self.identifier]
class Assign(Node):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class GetPropAssign(Node):
def __init__(self, prop_name, elements):
"""elements - function body"""
self.prop_name = prop_name
self.elements = elements
def children(self):
return [self.prop_name] + self.elements
class SetPropAssign(Node):
def __init__(self, prop_name, parameters, elements):
"""elements - function body"""
self.prop_name = prop_name
self.parameters = parameters
self.elements = elements
def children(self):
return [self.prop_name] + self.parameters + self.elements
class VarStatement(Node):
pass
class VarDecl(Node):
def __init__(self, identifier, initializer=None):
self.identifier = identifier
self.identifier._mangle_candidate = True
self.initializer = initializer
def children(self):
return [self.identifier, self.initializer]
class UnaryOp(Node):
def __init__(self, op, value, postfix=False):
self.op = op
self.value = value
self.postfix = postfix
def children(self):
return [self.value]
class BinOp(Node):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class Conditional(Node):
"""Conditional Operator ( ? : )"""
def __init__(self, predicate, consequent, alternative):
self.predicate = predicate
self.consequent = consequent
self.alternative = alternative
def children(self):
return [self.predicate, self.consequent, self.alternative]
class If(Node):
def __init__(self, predicate, consequent, alternative=None):
self.predicate = predicate
self.consequent = consequent
self.alternative = alternative
def children(self):
return [self.predicate, self.consequent, self.alternative]
class DoWhile(Node):
def __init__(self, predicate, statement):
self.predicate = predicate
self.statement = statement
def children(self):
return [self.predicate, self.statement]
class While(Node):
def __init__(self, predicate, statement):
self.predicate = predicate
self.statement = statement
def children(self):
return [self.predicate, self.statement]
class For(Node):
def __init__(self, init, cond, count, statement):
self.init = init
self.cond = cond
self.count = count
self.statement = statement
def children(self):
return [self.init, self.cond, self.count, self.statement]
class ForIn(Node):
def __init__(self, item, iterable, statement):
self.item = item
self.iterable = iterable
self.statement = statement
def children(self):
return [self.item, self.iterable, self.statement]
class Continue(Node):
def __init__(self, identifier=None):
self.identifier = identifier
def children(self):
return [self.identifier]
class Break(Node):
def __init__(self, identifier=None):
self.identifier = identifier
def children(self):
return [self.identifier]
class Return(Node):
def __init__(self, expr=None):
self.expr = expr
def children(self):
return [self.expr]
class With(Node):
def __init__(self, expr, statement):
self.expr = expr
self.statement = statement
def children(self):
return [self.expr, self.statement]
class Switch(Node):
def __init__(self, expr, cases, default=None):
self.expr = expr
self.cases = cases
self.default = default
def children(self):
return [self.expr] + self.cases + [self.default]
class Case(Node):
def __init__(self, expr, elements):
self.expr = expr
self.elements = elements if elements is not None else []
def children(self):
return [self.expr] + self.elements
class Default(Node):
def __init__(self, elements):
self.elements = elements if elements is not None else []
def children(self):
return self.elements
class Label(Node):
def __init__(self, identifier, statement):
self.identifier = identifier
self.statement = statement
def children(self):
return [self.identifier, self.statement]
class Throw(Node):
def __init__(self, expr):
self.expr = expr
def children(self):
return [self.expr]
class Try(Node):
def __init__(self, statements, catch=None, fin=None):
self.statements = statements
self.catch = catch
self.fin = fin
def children(self):
return [self.statements] + [self.catch, self.fin]
class Catch(Node):
def __init__(self, identifier, elements):
self.identifier = identifier
# CATCH identifiers are subject to name mangling. we need to mark them.
self.identifier._mangle_candidate = True
self.elements = elements
def children(self):
return [self.identifier, self.elements]
class Finally(Node):
def __init__(self, elements):
self.elements = elements
def children(self):
return self.elements
class Debugger(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class FuncBase(Node):
def __init__(self, identifier, parameters, elements):
self.identifier = identifier
self.parameters = parameters if parameters is not None else []
self.elements = elements if elements is not None else []
self._init_ids()
def _init_ids(self):
# function declaration/expression name and parameters are identifiers
# and therefore are subject to name mangling. we need to mark them.
if self.identifier is not None:
self.identifier._mangle_candidate = True
for param in self.parameters:
param._mangle_candidate = True
def children(self):
return [self.identifier] + self.parameters + self.elements
class FuncDecl(FuncBase):
pass
# The only difference is that function expression might not have an identifier
class FuncExpr(FuncBase):
pass
class Comma(Node):
def __init__(self, left, right):
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class EmptyStatement(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class ExprStatement(Node):
def __init__(self, expr):
self.expr = expr
def children(self):
return [self.expr]
class Elision(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class This(Node):
def __init__(self):
pass
def children(self):
return []
| mit | 808191e1ff020efbfd8c2faf86046f41 | 25.122892 | 79 | 0.619777 | 4.177649 | false | false | false | false |
kivy/kivy | examples/miscellaneous/shapecollisions.py | 13 | 13863 | # This is a simple demo for advanced collisions and mesh creation from a set
# of points. Its purpose is only to give an idea on how to make complex stuff.
# Check garden.collider for better performance.
from math import cos, sin, pi, sqrt
from random import random, randint
from itertools import combinations
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.graphics import Color, Mesh, Point
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (
ListProperty,
StringProperty,
ObjectProperty,
NumericProperty
)
# Cloud polygon, 67 vertices + custom origin [150, 50]
cloud_poly = [
150, 50,
109.7597, 112.9600, 115.4326, 113.0853, 120.1966, 111.9883,
126.0889, 111.9570, 135.0841, 111.9570, 138.5944, 112.5525,
145.7403, 115.5301, 150.5357, 120.3256, 155.5313, 125.5938,
160.8438, 130.5000, 165.7813, 132.5000, 171.8125, 132.3438,
177.5000, 128.4688, 182.1531, 121.4990, 185.1438, 114.0406,
185.9181, 108.5649, 186.2226, 102.5978, 187.8059, 100.2231,
193.2257, 100.1622, 197.6712, 101.8671, 202.6647, 104.1809,
207.1102, 105.8858, 214.2351, 105.0333, 219.3747, 102.8301,
224.0413, 98.7589, 225.7798, 93.7272, 226.0000, 86.8750,
222.9375, 81.0625, 218.3508, 76.0867, 209.8301, 70.8090,
198.7806, 66.1360, 189.7651, 62.2327, 183.6082, 56.6252,
183.2784, 50.5778, 190.9155, 42.7294, 196.8470, 36.1343,
197.7339, 29.9272, 195.5720, 23.4430, 191.2500, 15.9803,
184.0574, 9.5882, 175.8811, 3.9951, 165.7992, 3.4419,
159.0369, 7.4370, 152.5205, 14.8125, 147.4795, 24.2162,
142.4385, 29.0103, 137.0287, 30.9771, 127.1560, 27.4818,
119.1371, 20.0388, 112.1820, 11.3690, 104.6541, 7.1976,
97.2080, 6.2979, 88.9437, 9.8149, 80.3433, 17.3218,
76.5924, 26.5452, 78.1678, 37.0432, 83.5068, 47.1104,
92.8529, 58.3561, 106.3021, 69.2978, 108.9615, 73.9329,
109.0375, 80.6955, 104.4713, 88.6708, 100.6283, 95.7483,
100.1226, 101.5114, 102.8532, 107.2745, 105.6850, 110.9144,
109.7597, 112.9600
]
class BaseShape(Widget):
'''(internal) Base class for moving with touches or calls.'''
# keep references for offset
_old_pos = ListProperty([0, 0])
_old_touch = ListProperty([0, 0])
_new_touch = ListProperty([0, 0])
# shape properties
name = StringProperty('')
poly = ListProperty([])
shape = ObjectProperty()
poly_len = NumericProperty(0)
shape_len = NumericProperty(0)
debug_collider = ObjectProperty()
debug_collider_len = NumericProperty(0)
def __init__(self, **kwargs):
'''Create a shape with size [100, 100]
and give it a label if it's named.
'''
super(BaseShape, self).__init__(**kwargs)
self.size_hint = (None, None)
self.add_widget(Label(text=self.name))
def move_label(self, x, y, *args):
'''Move label with shape name as the only child.'''
self.children[0].pos = [x, y]
def move_collider(self, offset_x, offset_y, *args):
'''Move debug collider when the shape moves.'''
points = self.debug_collider.points[:]
for i in range(0, self.debug_collider_len, 2):
points[i] += offset_x
points[i + 1] += offset_y
self.debug_collider.points = points
def on_debug_collider(self, instance, value):
'''Recalculate length of collider points' array.'''
self.debug_collider_len = len(value.points)
def on_poly(self, instance, value):
'''Recalculate length of polygon points' array.'''
self.poly_len = len(value)
def on_shape(self, instance, value):
'''Recalculate length of Mesh vertices' array.'''
self.shape_len = len(value.vertices)
def on_pos(self, instance, pos):
'''Move polygon and its Mesh on each position change.
This event is above all and changes positions of the other
children-like components, so that a simple::
shape.pos = (100, 200)
would move everything, not just the widget itself.
'''
# position changed by touch
offset_x = self._new_touch[0] - self._old_touch[0]
offset_y = self._new_touch[1] - self._old_touch[1]
# position changed by call (shape.pos = X)
if not offset_x and not offset_y:
offset_x = pos[0] - self._old_pos[0]
offset_y = pos[1] - self._old_pos[1]
self._old_pos = pos
# move polygon points by offset
for i in range(0, self.poly_len, 2):
self.poly[i] += offset_x
self.poly[i + 1] += offset_y
# stick label to bounding box (widget)
if self.name:
self.move_label(*pos)
# move debug collider if available
if self.debug_collider is not None:
self.move_collider(offset_x, offset_y)
# return if no Mesh available
if self.shape is None:
return
# move Mesh vertices by offset
points = self.shape.vertices[:]
for i in range(0, self.shape_len, 2):
points[i] += offset_x
points[i + 1] += offset_y
self.shape.vertices = points
def on_touch_move(self, touch, *args):
'''Move shape with dragging.'''
# grab single touch for shape
if touch.grab_current is not self:
return
# get touches
x, y = touch.pos
new_pos = [x, y]
self._new_touch = new_pos
self._old_touch = [touch.px, touch.py]
# get offsets, move & trigger on_pos event
offset_x = self._new_touch[0] - self._old_touch[0]
offset_y = self._new_touch[1] - self._old_touch[1]
self.pos = [self.x + offset_x, self.y + offset_y]
def shape_collide(self, x, y, *args):
'''Point to polygon collision through a list of points.'''
# ignore if no polygon area is set
poly = self.poly
if not poly:
return False
n = self.poly_len
inside = False
p1x = poly[0]
p1y = poly[1]
# compare point pairs via PIP algo, too long, read
# https://en.wikipedia.org/wiki/Point_in_polygon
for i in range(0, n + 2, 2):
p2x = poly[i % n]
p2y = poly[(i + 1) % n]
if y > min(p1y, p2y) and y <= max(p1y, p2y) and x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
class RegularShape(BaseShape):
'''Starting from center and creating edges around for i.e.:
regular triangles, squares, regular pentagons, up to "circle".
'''
def __init__(self, edges=3, color=None, **kwargs):
super(RegularShape, self).__init__(**kwargs)
if edges < 3:
raise Exception('Not enough edges! (3+ only)')
color = color or [random() for i in range(3)]
rad_edge = (pi * 2) / float(edges)
r_x = self.width / 2.0
r_y = self.height / 2.0
poly = []
vertices = []
for i in range(edges):
# get points within a circle with radius of [r_x, r_y]
x = cos(rad_edge * i) * r_x + self.center_x
y = sin(rad_edge * i) * r_y + self.center_y
poly.extend([x, y])
# add UV layout zeros for Mesh, see Mesh docs
vertices.extend([x, y, 0, 0])
# draw Mesh shape from generated poly points
with self.canvas:
Color(rgba=(color[0], color[1], color[2], 0.6))
self.shape = Mesh(
pos=self.pos,
vertices=vertices,
indices=list(range(edges)),
mode='triangle_fan'
)
self.poly = poly
def on_touch_down(self, touch, *args):
if self.shape_collide(*touch.pos):
touch.grab(self)
class MeshShape(BaseShape):
'''Starting from a custom origin and custom points, draw
a convex Mesh shape with both touch and shape collisions.
.. note::
To get the points, use e.g. Pen tool from your favorite
graphics editor and export it to a human readable format.
'''
def __init__(self, color=None, **kwargs):
super(MeshShape, self).__init__(**kwargs)
color = color or [random() for i in range(3)]
min_x = 10000
min_y = 10000
max_x = 0
max_y = 0
# first point has to be the center of the convex shape's mass,
# that's where the triangle fan starts from
poly = [
50, 50, 0, 0, 100, 0, 100, 100, 0, 100
] if not self.poly else self.poly
# make the polygon smaller to fit 100x100 bounding box
poly = [round(p / 1.5, 4) for p in poly]
poly_len = len(poly)
# create list of vertices & get edges of the polygon
vertices = []
vertices_len = 0
for i in range(0, poly_len, 2):
min_x = poly[i] if poly[i] < min_x else min_x
min_y = poly[i + 1] if poly[i + 1] < min_y else min_y
max_x = poly[i] if poly[i] > max_x else max_x
max_y = poly[i + 1] if poly[i + 1] > max_y else max_y
# add UV layout zeros for Mesh
vertices_len += 4
vertices.extend([poly[i], poly[i + 1], 0, 0])
# get center of poly from edges
poly_center_x, poly_center_y = [
(max_x - min_x) / 2.0,
(max_y - min_y) / 2.0
]
# get distance from the widget's center and push the points to
# the widget's origin, so that min_x and min_y for the poly would
# result in 0 i.e.: points moved as close as possible to [0, 0]
# -> No editor gives poly points moved to the origin directly
dec_x = (self.center_x - poly_center_x) - min_x
dec_y = (self.center_y - poly_center_y) - min_y
# move polygon points to the bounding box (touch)
for i in range(0, poly_len, 2):
poly[i] += dec_x
poly[i + 1] += dec_y
# move mesh points to the bounding box (image)
# has to contain the same points as polygon
for i in range(0, vertices_len, 4):
vertices[i] += dec_x
vertices[i + 1] += dec_y
# draw Mesh shape from generated poly points
with self.canvas:
Color(rgba=(color[0], color[1], color[2], 0.6))
self.shape = Mesh(
pos=self.pos,
vertices=vertices,
indices=list(range(int(poly_len / 2.0))),
mode='triangle_fan'
)
# debug polygon points with Line to see the origin point
# and intersections with the other points
# Line(points=poly)
self.poly = poly
def on_touch_down(self, touch, *args):
if self.shape_collide(*touch.pos):
touch.grab(self)
class Collisions(App):
def __init__(self, **kwargs):
super(Collisions, self).__init__(**kwargs)
# register an event for collision
self.register_event_type('on_collision')
def collision_circles(self, shapes=None, distance=100, debug=False, *args):
'''Simple circle <-> circle collision between the shapes i.e. there's
a simple line between the centers of the two shapes and the collision
is only about measuring distance -> 1+ radii intersections.
'''
# get all combinations from all available shapes
if not hasattr(self, 'combins'):
self.combins = list(combinations(shapes, 2))
for com in self.combins:
x = (com[0].center_x - com[1].center_x) ** 2
y = (com[0].center_y - com[1].center_y) ** 2
if sqrt(x + y) <= distance:
# dispatch a custom event if the objects collide
self.dispatch('on_collision', (com[0], com[1]))
# draw collider only if debugging
if not debug:
return
# add circle collider only if the shape doesn't have one
for shape in shapes:
if shape.debug_collider is not None:
continue
d = distance / 2.0
cx, cy = shape.center
points = [(cx + d * cos(i), cy + d * sin(i)) for i in range(44)]
points = [p for ps in points for p in ps]
with shape.canvas:
Color(rgba=(0, 1, 0, 1))
shape.debug_collider = Point(points=points)
def on_collision(self, pair, *args):
'''Dispatched when objects collide, gives back colliding objects
as a "pair" argument holding their instances.
'''
print('Collision {} x {}'.format(pair[0].name, pair[1].name))
def build(self):
# the environment for all 2D shapes
scene = FloatLayout()
# list of 2D shapes, starting with regular ones
shapes = [
RegularShape(
name='Shape {}'.format(x), edges=x
) for x in range(3, 13)
]
shapes.append(MeshShape(name='DefaultMesh'))
shapes.append(MeshShape(name='Cloud', poly=cloud_poly))
shapes.append(MeshShape(
name='3QuarterCloud',
poly=cloud_poly[:110]
))
# move shapes to some random position
for shape in shapes:
shape.pos = [randint(50, i - 50) for i in Window.size]
scene.add_widget(shape)
# check for simple collisions between the shapes
Clock.schedule_interval(
lambda *t: self.collision_circles(shapes, debug=True), 0.1)
return scene
if __name__ == '__main__':
Collisions().run()
| mit | 4cc14ef623e23fbb6f88bfcf092f5b6e | 33.919395 | 79 | 0.56842 | 3.421273 | false | false | false | false |
hackatbrown/2015.hackatbrown.org | hack-at-brown-2015/requests/api.py | 638 | 4333 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| mit | 27fb2eac722656259a3485cd33586038 | 35.108333 | 136 | 0.667898 | 3.896583 | false | false | false | false |
andymccurdy/redis-py | benchmarks/basic_operations.py | 4 | 4867 | import time
from argparse import ArgumentParser
from functools import wraps
import redis
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"-n", type=int, help="Total number of requests (default 100000)", default=100000
)
parser.add_argument(
"-P",
type=int,
help=("Pipeline <numreq> requests." " Default 1 (no pipeline)."),
default=1,
)
parser.add_argument(
"-s",
type=int,
help="Data size of SET/GET value in bytes (default 2)",
default=2,
)
args = parser.parse_args()
return args
def run():
args = parse_args()
r = redis.Redis()
r.flushall()
set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
get_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
get_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
incr(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
lpush(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
lrange_300(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
lpop(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
hmset(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.monotonic()
ret = func(*args, **kwargs)
duration = time.monotonic() - start
if "num" in kwargs:
count = kwargs["num"]
else:
count = args[1]
print(f"{func.__name__} - {count} Requests")
print(f"Duration = {duration}")
print(f"Rate = {count/duration}")
print()
return ret
return wrapper
@timer
def set_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
set_data = "a".ljust(data_size, "0")
for i in range(num):
conn.set(f"set_str:{i}", set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def set_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
set_data = 10 ** (data_size - 1)
for i in range(num):
conn.set(f"set_int:{i}", set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def get_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
for i in range(num):
conn.get(f"set_str:{i}")
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def get_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
for i in range(num):
conn.get(f"set_int:{i}")
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def incr(conn, num, pipeline_size, *args, **kwargs):
if pipeline_size > 1:
conn = conn.pipeline()
for i in range(num):
conn.incr("incr_key")
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def lpush(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
set_data = 10 ** (data_size - 1)
for i in range(num):
conn.lpush("lpush_key", set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def lrange_300(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
for i in range(num):
conn.lrange("lpush_key", i, i + 300)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def lpop(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
for i in range(num):
conn.lpop("lpush_key")
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
@timer
def hmset(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
set_data = {"str_value": "string", "int_value": 123456, "float_value": 123456.0}
for i in range(num):
conn.hmset("hmset_key", set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
if pipeline_size > 1:
conn.execute()
if __name__ == "__main__":
run()
| mit | 71ffcc3b00edcafde96f741b8f33de98 | 24.217617 | 88 | 0.570372 | 3.23819 | false | false | false | false |
andymccurdy/redis-py | tests/test_graph_utils/test_node.py | 4 | 2021 | import pytest
from redis.commands.graph import node
@pytest.fixture
def fixture():
no_args = node.Node()
no_props = node.Node(node_id=1, alias="alias", label="l")
props_only = node.Node(properties={"a": "a", "b": 10})
no_label = node.Node(node_id=1, alias="alias", properties={"a": "a"})
multi_label = node.Node(node_id=1, alias="alias", label=["l", "ll"])
return no_args, no_props, props_only, no_label, multi_label
@pytest.mark.redismod
def test_to_string(fixture):
no_args, no_props, props_only, no_label, multi_label = fixture
assert no_args.to_string() == ""
assert no_props.to_string() == ""
assert props_only.to_string() == '{a:"a",b:10}'
assert no_label.to_string() == '{a:"a"}'
assert multi_label.to_string() == ""
@pytest.mark.redismod
def test_stringify(fixture):
no_args, no_props, props_only, no_label, multi_label = fixture
assert str(no_args) == "()"
assert str(no_props) == "(alias:l)"
assert str(props_only) == '({a:"a",b:10})'
assert str(no_label) == '(alias{a:"a"})'
assert str(multi_label) == "(alias:l:ll)"
@pytest.mark.redismod
def test_comparision(fixture):
no_args, no_props, props_only, no_label, multi_label = fixture
assert node.Node() == node.Node()
assert node.Node(node_id=1) == node.Node(node_id=1)
assert node.Node(node_id=1) != node.Node(node_id=2)
assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="b")
assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="a")
assert node.Node(node_id=1, label="a") == node.Node(node_id=1, label="a")
assert node.Node(node_id=1, label="a") != node.Node(node_id=1, label="b")
assert node.Node(node_id=1, alias="a", label="l") == node.Node(
node_id=1, alias="a", label="l"
)
assert node.Node(alias="a", label="l") != node.Node(alias="a", label="l1")
assert node.Node(properties={"a": 10}) == node.Node(properties={"a": 10})
assert node.Node() != node.Node(properties={"a": 10})
| mit | bc97541947654f6e55e9315330b55c4c | 37.865385 | 78 | 0.617021 | 2.838483 | false | true | false | false |
andymccurdy/redis-py | tests/test_sentinel.py | 2 | 7104 | import socket
import pytest
import redis.sentinel
from redis import exceptions
from redis.sentinel import (
MasterNotFoundError,
Sentinel,
SentinelConnectionPool,
SlaveNotFoundError,
)
@pytest.fixture(scope="module")
def master_ip(master_host):
yield socket.gethostbyname(master_host[0])
class SentinelTestClient:
def __init__(self, cluster, id):
self.cluster = cluster
self.id = id
def sentinel_masters(self):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
return {self.cluster.service_name: self.cluster.master}
def sentinel_slaves(self, master_name):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
if master_name != self.cluster.service_name:
return []
return self.cluster.slaves
def execute_command(self, *args, **kwargs):
# wrapper purely to validate the calls don't explode
from redis.client import bool_ok
return bool_ok
class SentinelTestCluster:
def __init__(self, servisentinel_ce_name="mymaster", ip="127.0.0.1", port=6379):
self.clients = {}
self.master = {
"ip": ip,
"port": port,
"is_master": True,
"is_sdown": False,
"is_odown": False,
"num-other-sentinels": 0,
}
self.service_name = servisentinel_ce_name
self.slaves = []
self.nodes_down = set()
self.nodes_timeout = set()
def connection_error_if_down(self, node):
if node.id in self.nodes_down:
raise exceptions.ConnectionError
def timeout_if_down(self, node):
if node.id in self.nodes_timeout:
raise exceptions.TimeoutError
def client(self, host, port, **kwargs):
return SentinelTestClient(self, (host, port))
@pytest.fixture()
def cluster(request, master_ip):
def teardown():
redis.sentinel.Redis = saved_Redis
cluster = SentinelTestCluster(ip=master_ip)
saved_Redis = redis.sentinel.Redis
redis.sentinel.Redis = cluster.client
request.addfinalizer(teardown)
return cluster
@pytest.fixture()
def sentinel(request, cluster):
return Sentinel([("foo", 26379), ("bar", 26379)])
@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("xxx")
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_down.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
sentinel = Sentinel([("foo", 26379)], min_other_sentinels=1)
# min_other_sentinels
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
cluster.master["num-other-sentinels"] = 2
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
cluster.master["is_odown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
cluster.master["is_sdown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves = [
{"ip": "slave0", "port": 1234, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 1234, "is_odown": False, "is_sdown": False},
]
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
# slave0 -> ODOWN
cluster.slaves[0]["is_odown"] = True
assert sentinel.discover_slaves("mymaster") == [("slave1", 1234)]
# slave1 -> SDOWN
cluster.slaves[1]["is_sdown"] = True
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves[0]["is_odown"] = False
cluster.slaves[1]["is_sdown"] = False
# node0 -> DOWN
cluster.nodes_down.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
cluster.nodes_down.clear()
# node0 -> TIMEOUT
cluster.nodes_timeout.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
@pytest.mark.onlynoncluster
def test_master_for(cluster, sentinel, master_ip):
master = sentinel.master_for("mymaster", db=9)
assert master.ping()
assert master.connection_pool.master_address == (master_ip, 6379)
# Use internal connection check
master = sentinel.master_for("mymaster", db=9, check_connection=True)
assert master.ping()
@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
{"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False},
]
slave = sentinel.slave_for("mymaster", db=9)
assert slave.ping()
@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master["is_odown"] = True
slave = sentinel.slave_for("mymaster", db=9)
with pytest.raises(SlaveNotFoundError):
slave.ping()
@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
{"ip": "slave0", "port": 6379, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 6379, "is_odown": False, "is_sdown": False},
]
pool = SentinelConnectionPool("mymaster", sentinel)
rotator = pool.rotate_slaves()
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
# Fallback to master
assert next(rotator) == (master_ip, 6379)
with pytest.raises(SlaveNotFoundError):
next(rotator)
@pytest.mark.onlynoncluster
def test_ckquorum(cluster, sentinel):
assert sentinel.sentinel_ckquorum("mymaster")
@pytest.mark.onlynoncluster
def test_flushconfig(cluster, sentinel):
assert sentinel.sentinel_flushconfig()
@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
cluster.master["is_odown"] = True
assert sentinel.sentinel_reset("mymaster")
| mit | 1e4ff325993c21e5de1ba5246a01b67c | 29.358974 | 87 | 0.662866 | 3.513353 | false | true | false | false |
andymccurdy/redis-py | redis/commands/search/querystring.py | 2 | 7503 | def tags(*t):
"""
Indicate that the values should be matched to a tag field
### Parameters
- **t**: Tags to search for
"""
if not t:
raise ValueError("At least one tag must be specified")
return TagValue(*t)
def between(a, b, inclusive_min=True, inclusive_max=True):
"""
Indicate that value is a numeric range
"""
return RangeValue(a, b, inclusive_min=inclusive_min, inclusive_max=inclusive_max)
def equal(n):
"""
Match a numeric value
"""
return between(n, n)
def lt(n):
"""
Match any value less than n
"""
return between(None, n, inclusive_max=False)
def le(n):
"""
Match any value less or equal to n
"""
return between(None, n, inclusive_max=True)
def gt(n):
"""
Match any value greater than n
"""
return between(n, None, inclusive_min=False)
def ge(n):
"""
Match any value greater or equal to n
"""
return between(n, None, inclusive_min=True)
def geo(lat, lon, radius, unit="km"):
"""
Indicate that value is a geo region
"""
return GeoValue(lat, lon, radius, unit)
class Value:
@property
def combinable(self):
"""
Whether this type of value may be combined with other values
for the same field. This makes the filter potentially more efficient
"""
return False
@staticmethod
def make_value(v):
"""
Convert an object to a value, if it is not a value already
"""
if isinstance(v, Value):
return v
return ScalarValue(v)
def to_string(self):
raise NotImplementedError()
def __str__(self):
return self.to_string()
class RangeValue(Value):
combinable = False
def __init__(self, a, b, inclusive_min=False, inclusive_max=False):
if a is None:
a = "-inf"
if b is None:
b = "inf"
self.range = [str(a), str(b)]
self.inclusive_min = inclusive_min
self.inclusive_max = inclusive_max
def to_string(self):
return "[{1}{0[0]} {2}{0[1]}]".format(
self.range,
"(" if not self.inclusive_min else "",
"(" if not self.inclusive_max else "",
)
class ScalarValue(Value):
combinable = True
def __init__(self, v):
self.v = str(v)
def to_string(self):
return self.v
class TagValue(Value):
combinable = False
def __init__(self, *tags):
self.tags = tags
def to_string(self):
return "{" + " | ".join(str(t) for t in self.tags) + "}"
class GeoValue(Value):
def __init__(self, lon, lat, radius, unit="km"):
self.lon = lon
self.lat = lat
self.radius = radius
self.unit = unit
class Node:
def __init__(self, *children, **kwparams):
"""
Create a node
### Parameters
- **children**: One or more sub-conditions. These can be additional
`intersect`, `disjunct`, `union`, `optional`, or any other `Node`
type.
The semantics of multiple conditions are dependent on the type of
query. For an `intersection` node, this amounts to a logical AND,
for a `union` node, this amounts to a logical `OR`.
- **kwparams**: key-value parameters. Each key is the name of a field,
and the value should be a field value. This can be one of the
following:
- Simple string (for text field matches)
- value returned by one of the helper functions
- list of either a string or a value
### Examples
Field `num` should be between 1 and 10
```
intersect(num=between(1, 10)
```
Name can either be `bob` or `john`
```
union(name=("bob", "john"))
```
Don't select countries in Israel, Japan, or US
```
disjunct_union(country=("il", "jp", "us"))
```
"""
self.params = []
kvparams = {}
for k, v in kwparams.items():
curvals = kvparams.setdefault(k, [])
if isinstance(v, (str, int, float)):
curvals.append(Value.make_value(v))
elif isinstance(v, Value):
curvals.append(v)
else:
curvals.extend(Value.make_value(subv) for subv in v)
self.params += [Node.to_node(p) for p in children]
for k, v in kvparams.items():
self.params.extend(self.join_fields(k, v))
def join_fields(self, key, vals):
if len(vals) == 1:
return [BaseNode(f"@{key}:{vals[0].to_string()}")]
if not vals[0].combinable:
return [BaseNode(f"@{key}:{v.to_string()}") for v in vals]
s = BaseNode(f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})")
return [s]
@classmethod
def to_node(cls, obj): # noqa
if isinstance(obj, Node):
return obj
return BaseNode(obj)
@property
def JOINSTR(self):
raise NotImplementedError()
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
pre, post = ("(", ")") if with_parens else ("", "")
return f"{pre}{self.JOINSTR.join(n.to_string() for n in self.params)}{post}"
def _should_use_paren(self, optval):
if optval is not None:
return optval
return len(self.params) > 1
def __str__(self):
return self.to_string()
class BaseNode(Node):
def __init__(self, s):
super().__init__()
self.s = str(s)
def to_string(self, with_parens=None):
return self.s
class IntersectNode(Node):
"""
Create an intersection node. All children need to be satisfied in order for
this node to evaluate as true
"""
JOINSTR = " "
class UnionNode(Node):
"""
Create a union node. Any of the children need to be satisfied in order for
this node to evaluate as true
"""
JOINSTR = "|"
class DisjunctNode(IntersectNode):
"""
Create a disjunct node. In order for this node to be true, all of its
children must evaluate to false
"""
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
ret = super().to_string(with_parens=False)
if with_parens:
return "(-" + ret + ")"
else:
return "-" + ret
class DistjunctUnion(DisjunctNode):
"""
This node is true if *all* of its children are false. This is equivalent to
```
disjunct(union(...))
```
"""
JOINSTR = "|"
class OptionalNode(IntersectNode):
"""
Create an optional node. If this nodes evaluates to true, then the document
will be rated higher in score/rank.
"""
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
ret = super().to_string(with_parens=False)
if with_parens:
return "(~" + ret + ")"
else:
return "~" + ret
def intersect(*args, **kwargs):
return IntersectNode(*args, **kwargs)
def union(*args, **kwargs):
return UnionNode(*args, **kwargs)
def disjunct(*args, **kwargs):
return DisjunctNode(*args, **kwargs)
def disjunct_union(*args, **kwargs):
return DistjunctUnion(*args, **kwargs)
def querystring(*args, **kwargs):
return intersect(*args, **kwargs).to_string()
| mit | 6800760a23f09d2acc6b13fd58e61faa | 22.894904 | 85 | 0.562175 | 3.78557 | false | false | false | false |
andymccurdy/redis-py | redis/commands/json/commands.py | 1 | 13920 | import os
from json import JSONDecodeError, loads
from typing import Dict, List, Optional, Union
from deprecated import deprecated
from redis.exceptions import DataError
from ._util import JsonType
from .decoders import decode_dict_keys
from .path import Path
class JSONCommands:
"""json commands."""
def arrappend(
self, name: str, path: Optional[str] = Path.root_path(), *args: List[JsonType]
) -> List[Union[int, None]]:
"""Append the objects ``args`` to the array under the
``path` in key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrappend
""" # noqa
pieces = [name, str(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command("JSON.ARRAPPEND", *pieces)
def arrindex(
self,
name: str,
path: str,
scalar: int,
start: Optional[int] = 0,
stop: Optional[int] = -1,
) -> List[Union[int, None]]:
"""
Return the index of ``scalar`` in the JSON array under ``path`` at key
``name``.
The search can be limited using the optional inclusive ``start``
and exclusive ``stop`` indices.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrindex
""" # noqa
return self.execute_command(
"JSON.ARRINDEX", name, str(path), self._encode(scalar), start, stop
)
def arrinsert(
self, name: str, path: str, index: int, *args: List[JsonType]
) -> List[Union[int, None]]:
"""Insert the objects ``args`` to the array at index ``index``
under the ``path` in key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrinsert
""" # noqa
pieces = [name, str(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command("JSON.ARRINSERT", *pieces)
def arrlen(
self, name: str, path: Optional[str] = Path.root_path()
) -> List[Union[int, None]]:
"""Return the length of the array JSON value under ``path``
at key``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrlen
""" # noqa
return self.execute_command("JSON.ARRLEN", name, str(path))
def arrpop(
self,
name: str,
path: Optional[str] = Path.root_path(),
index: Optional[int] = -1,
) -> List[Union[str, None]]:
"""Pop the element at ``index`` in the array JSON value under
``path`` at key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrpop
""" # noqa
return self.execute_command("JSON.ARRPOP", name, str(path), index)
def arrtrim(
self, name: str, path: str, start: int, stop: int
) -> List[Union[int, None]]:
"""Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``.
For more information: https://oss.redis.com/redisjson/commands/#jsonarrtrim
""" # noqa
return self.execute_command("JSON.ARRTRIM", name, str(path), start, stop)
def type(self, name: str, path: Optional[str] = Path.root_path()) -> List[str]:
"""Get the type of the JSON value under ``path`` from key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsontype
""" # noqa
return self.execute_command("JSON.TYPE", name, str(path))
def resp(self, name: str, path: Optional[str] = Path.root_path()) -> List:
"""Return the JSON value under ``path`` at key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonresp
""" # noqa
return self.execute_command("JSON.RESP", name, str(path))
def objkeys(
self, name: str, path: Optional[str] = Path.root_path()
) -> List[Union[List[str], None]]:
"""Return the key names in the dictionary JSON value under ``path`` at
key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonobjkeys
""" # noqa
return self.execute_command("JSON.OBJKEYS", name, str(path))
def objlen(self, name: str, path: Optional[str] = Path.root_path()) -> int:
"""Return the length of the dictionary JSON value under ``path`` at key
``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonobjlen
""" # noqa
return self.execute_command("JSON.OBJLEN", name, str(path))
def numincrby(self, name: str, path: str, number: int) -> str:
"""Increment the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``.
For more information: https://oss.redis.com/redisjson/commands/#jsonnumincrby
""" # noqa
return self.execute_command(
"JSON.NUMINCRBY", name, str(path), self._encode(number)
)
@deprecated(version="4.0.0", reason="deprecated since redisjson 1.0.0")
def nummultby(self, name: str, path: str, number: int) -> str:
"""Multiply the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``.
For more information: https://oss.redis.com/redisjson/commands/#jsonnummultby
""" # noqa
return self.execute_command(
"JSON.NUMMULTBY", name, str(path), self._encode(number)
)
def clear(self, name: str, path: Optional[str] = Path.root_path()) -> int:
"""
Empty arrays and objects (to have zero slots/keys without deleting the
array/object).
Return the count of cleared paths (ignoring non-array and non-objects
paths).
For more information: https://oss.redis.com/redisjson/commands/#jsonclear
""" # noqa
return self.execute_command("JSON.CLEAR", name, str(path))
def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int:
"""Delete the JSON value stored at key ``key`` under ``path``.
For more information: https://oss.redis.com/redisjson/commands/#jsondel
"""
return self.execute_command("JSON.DEL", key, str(path))
# forget is an alias for delete
forget = delete
def get(
self, name: str, *args, no_escape: Optional[bool] = False
) -> List[JsonType]:
"""
Get the object stored as a JSON value at key ``name``.
``args`` is zero or more paths, and defaults to root path
```no_escape`` is a boolean flag to add no_escape option to get
non-ascii characters
For more information: https://oss.redis.com/redisjson/commands/#jsonget
""" # noqa
pieces = [name]
if no_escape:
pieces.append("noescape")
if len(args) == 0:
pieces.append(Path.root_path())
else:
for p in args:
pieces.append(str(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command("JSON.GET", *pieces)
except TypeError:
return None
def mget(self, keys: List[str], path: str) -> List[JsonType]:
"""
Get the objects stored as a JSON values under ``path``. ``keys``
is a list of one or more keys.
For more information: https://oss.redis.com/redisjson/commands/#jsonmget
""" # noqa
pieces = []
pieces += keys
pieces.append(str(path))
return self.execute_command("JSON.MGET", *pieces)
def set(
self,
name: str,
path: str,
obj: JsonType,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> Optional[str]:
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
For the purpose of using this within a pipeline, this command is also
aliased to jsonset.
For more information: https://oss.redis.com/redisjson/commands/#jsonset
"""
if decode_keys:
obj = decode_dict_keys(obj)
pieces = [name, str(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception(
"nx and xx are mutually exclusive: use one, the "
"other or neither - but not both"
)
elif nx:
pieces.append("NX")
elif xx:
pieces.append("XX")
return self.execute_command("JSON.SET", *pieces)
def set_file(
self,
name: str,
path: str,
file_name: str,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> Optional[str]:
"""
Set the JSON value at key ``name`` under the ``path`` to the content
of the json file ``file_name``.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
"""
with open(file_name, "r") as fp:
file_content = loads(fp.read())
return self.set(name, path, file_content, nx=nx, xx=xx, decode_keys=decode_keys)
def set_path(
self,
json_path: str,
root_folder: str,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> List[Dict[str, bool]]:
"""
Iterate over ``root_folder`` and set each JSON file to a value
under ``json_path`` with the file name as the key.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
"""
set_files_result = {}
for root, dirs, files in os.walk(root_folder):
for file in files:
file_path = os.path.join(root, file)
try:
file_name = file_path.rsplit(".")[0]
self.set_file(
file_name,
json_path,
file_path,
nx=nx,
xx=xx,
decode_keys=decode_keys,
)
set_files_result[file_path] = True
except JSONDecodeError:
set_files_result[file_path] = False
return set_files_result
def strlen(self, name: str, path: Optional[str] = None) -> List[Union[int, None]]:
"""Return the length of the string JSON value under ``path`` at key
``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonstrlen
""" # noqa
pieces = [name]
if path is not None:
pieces.append(str(path))
return self.execute_command("JSON.STRLEN", *pieces)
def toggle(
self, name: str, path: Optional[str] = Path.root_path()
) -> Union[bool, List[Optional[int]]]:
"""Toggle boolean value under ``path`` at key ``name``.
returning the new value.
For more information: https://oss.redis.com/redisjson/commands/#jsontoggle
""" # noqa
return self.execute_command("JSON.TOGGLE", name, str(path))
def strappend(
self, name: str, value: str, path: Optional[int] = Path.root_path()
) -> Union[int, List[Optional[int]]]:
"""Append to the string JSON value. If two options are specified after
the key name, the path is determined to be the first. If a single
option is passed, then the root_path (i.e Path.root_path()) is used.
For more information: https://oss.redis.com/redisjson/commands/#jsonstrappend
""" # noqa
pieces = [name, str(path), self._encode(value)]
return self.execute_command("JSON.STRAPPEND", *pieces)
def debug(
self,
subcommand: str,
key: Optional[str] = None,
path: Optional[str] = Path.root_path(),
) -> Union[int, List[str]]:
"""Return the memory usage in bytes of a value under ``path`` from
key ``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsondebug
""" # noqa
valid_subcommands = ["MEMORY", "HELP"]
if subcommand not in valid_subcommands:
raise DataError("The only valid subcommands are ", str(valid_subcommands))
pieces = [subcommand]
if subcommand == "MEMORY":
if key is None:
raise DataError("No key specified")
pieces.append(key)
pieces.append(str(path))
return self.execute_command("JSON.DEBUG", *pieces)
@deprecated(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonget(self, *args, **kwargs):
return self.get(*args, **kwargs)
@deprecated(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonmget(self, *args, **kwargs):
return self.mget(*args, **kwargs)
@deprecated(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonset(self, *args, **kwargs):
return self.set(*args, **kwargs)
| mit | 0562824fe64e596f09f22059f74baafd | 34.876289 | 88 | 0.574066 | 3.926657 | false | false | false | false |
andymccurdy/redis-py | tests/test_connection_pool.py | 2 | 29948 | import os
import re
import time
from threading import Thread
from unittest import mock
import pytest
import redis
from redis.connection import ssl_available, to_bool
from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt
from .test_pubsub import wait_for_message
class DummyConnection:
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
class TestConnectionPool:
def get_pool(
self,
connection_kwargs=None,
max_connections=None,
connection_class=redis.Connection,
):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs,
)
return pool
def test_connection_creation(self):
connection_kwargs = {"foo": "bar", "biz": "baz"}
pool = self.get_pool(
connection_kwargs=connection_kwargs, connection_class=DummyConnection
)
connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
c2 = pool.get_connection("_")
assert c1 != c2
def test_max_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(max_connections=2, connection_kwargs=connection_kwargs)
pool.get_connection("_")
pool.get_connection("_")
with pytest.raises(redis.ConnectionError):
pool.get_connection("_")
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
pool.release(c1)
c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {
"host": "localhost",
"port": 6379,
"db": 1,
"client_name": "test-client",
}
pool = self.get_pool(
connection_kwargs=connection_kwargs, connection_class=redis.Connection
)
expected = (
"ConnectionPool<Connection<"
"host=localhost,port=6379,db=1,client_name=test-client>>"
)
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
pool = self.get_pool(
connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection,
)
expected = (
"ConnectionPool<UnixDomainSocketConnection<"
"path=/abc,db=1,client_name=test-client>>"
)
assert repr(pool) == expected
class TestBlockingConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(
connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs,
)
return pool
def test_connection_creation(self, master_host):
connection_kwargs = {
"foo": "bar",
"biz": "baz",
"host": master_host[0],
"port": master_host[1],
}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
c2 = pool.get_connection("_")
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self, master_host):
"When out of connections, block for timeout seconds, then raise"
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(
max_connections=1, timeout=0.1, connection_kwargs=connection_kwargs
)
pool.get_connection("_")
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection("_")
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def test_connection_pool_blocks_until_conn_available(self, master_host):
"""
When out of connections, block until another connection is released
to the pool
"""
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(
max_connections=1, timeout=2, connection_kwargs=connection_kwargs
)
c1 = pool.get_connection("_")
def target():
time.sleep(0.1)
pool.release(c1)
start = time.time()
Thread(target=target).start()
pool.get_connection("_")
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
pool.release(c1)
c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host="localhost", port=6379, client_name="test-client"
)
expected = (
"ConnectionPool<Connection<"
"host=localhost,port=6379,db=0,client_name=test-client>>"
)
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path="abc",
client_name="test-client",
)
expected = (
"ConnectionPool<UnixDomainSocketConnection<"
"path=abc,db=0,client_name=test-client>>"
)
assert repr(pool) == expected
class TestConnectionPoolURLParsing:
def test_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my.host")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "my.host",
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "my / host +=+",
}
def test_port(self):
pool = redis.ConnectionPool.from_url("redis://localhost:6380")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"port": 6380,
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url("redis://myuser:@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
"redis://%2Fmyuser%2F%2B name%3D%24+:@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "/myuser/+ name=$+",
}
def test_password(self):
pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
"redis://:%2Fmypass%2F%2B word%3D%24+@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"password": "/mypass/+ word=$+",
}
@skip_if_server_version_lt("6.0.0")
def test_username_and_password(self):
pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "myuser",
"password": "mypass",
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url("redis://localhost", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 1,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 2,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 3,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=20&socket_connect_timeout=10"
"&socket_keepalive=&retry_on_timeout=Yes&max_connections=10"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 2,
"socket_timeout": 20.0,
"socket_connect_timeout": 10.0,
"retry_on_timeout": True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ""),
(False, 0),
(False, "0"),
(False, "f"),
(False, "F"),
(False, "False"),
(False, "n"),
(False, "N"),
(False, "No"),
(True, 1),
(True, "1"),
(True, "y"),
(True, "Y"),
(True, "Yes"),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
assert pool.connection_kwargs["client_name"] == "test-client"
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=_&" "socket_connect_timeout=abc"
)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url("redis://localhost")
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url("redis://myhost")
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
"host": "myhost",
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url("localhost")
assert str(cm.value) == (
"Redis URL must specify one of the following schemes "
"(redis://, rediss://, unix://)"
)
class TestConnectionPoolUnixSocketURLParsing:
def test_defaults(self):
pool = redis.ConnectionPool.from_url("unix:///socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url("unix://myuser:@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
"unix://%2Fmyuser%2F%2B name%3D%24+:@/socket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"username": "/myuser/+ name=$+",
}
def test_password(self):
pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
"unix://:%2Fmypass%2F%2B word%3D%24+@/socket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"password": "/mypass/+ word=$+",
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
"unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/my/path/to/../+_+=$ocket",
"password": "mypassword",
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url("unix:///socket", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"db": 1,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"db": 2,
}
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
assert pool.connection_kwargs["client_name"] == "test-client"
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url("unix:///socket?a=1&b=2")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {"path": "/socket", "a": "1", "b": "2"}
def test_connection_class_override(self):
class MyConnection(redis.UnixDomainSocketConnection):
pass
pool = redis.ConnectionPool.from_url(
"unix:///socket", connection_class=MyConnection
)
assert pool.connection_class == MyConnection
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
class TestSSLConnectionURLParsing:
def test_host(self):
pool = redis.ConnectionPool.from_url("rediss://my.host")
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
"host": "my.host",
}
def test_connection_class_override(self):
class MyConnection(redis.SSLConnection):
pass
pool = redis.ConnectionPool.from_url(
"rediss://my.host", connection_class=MyConnection
)
assert pool.connection_class == MyConnection
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none")
assert pool.get_connection("_").cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional")
assert pool.get_connection("_").cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required")
assert pool.get_connection("_").cert_reqs == ssl.CERT_REQUIRED
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False")
assert pool.get_connection("_").check_hostname is False
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True")
assert pool.get_connection("_").check_hostname is True
class TestConnection:
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command("DEBUG", "ERROR", "LOADING fake message")
assert not r.connection._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command("DEBUG", "ERROR", "LOADING fake message")
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command("DEBUG", "ERROR", "LOADING fake message")
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost")
pool = connection.connection_pool
assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
"ConnectionPool",
"Connection",
"host=localhost,port=6379,db=0",
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket,db=0",
)
@skip_if_redis_enterprise()
def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
r.execute_command(
"DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
)
@skip_if_redis_enterprise()
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
r.execute_command("DEBUG", "ERROR", "ERR invalid password")
@pytest.mark.onlynoncluster
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request, single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set("a", "123")
assert r.get("a") == b"123"
@pytest.mark.onlynoncluster
class TestHealthCheck:
interval = 60
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request, health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
assert self.interval > diff > (self.interval - 1)
def test_health_check_runs(self, r):
r.connection.next_health_check = time.time() - 1
r.connection.check_health()
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
r.get("foo")
r.connection.next_health_check = time.time()
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
r.get("foo")
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
r.get("foo")
next_health_check = r.connection.next_health_check
r.get("foo")
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
r.get("foo")
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
r.get("foo")
ping_call_spec = (("PING",), {"check_health": False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
def test_health_check_in_watched_pipeline(self, r):
r.set("foo", "bar")
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
pipe.watch("foo")
# the health check should be called when watching
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(pipe.connection)
assert pipe.get("foo") == b"bar"
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = pipe.set("foo", "not-bar").get("foo").execute()
assert responses == [True, b"not-bar"]
m.assert_any_call("PING", check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
assert not p.subscribed
p.subscribe("foo")
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call("PING", check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
p.subscribe("foo")
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call("PING", check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
p.subscribe("bar")
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with("SUBSCRIBE", "bar", check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
p.subscribe("foo")
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
m.assert_called_with("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
| mit | 1c52d0095607fd1eaa249ccb3ccc623f | 36.67044 | 88 | 0.59787 | 4.024728 | false | true | false | false |
gratipay/gratipay.com | gratipay/models/exchange_route.py | 1 | 4865 | from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __eq__(self, other):
if not isinstance(other, ExchangeRoute):
return False
return self.id == other.id
def __ne__(self, other):
if not isinstance(other, ExchangeRoute):
return True
return self.id != other.id
def __repr__(self):
return '<ExchangeRoute: %s on %s>' % (repr(self.address), repr(self.network))
# Constructors
# ============
@classmethod
def from_id(cls, id, cursor=None):
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
if route:
from gratipay.models.participant import Participant # XXX Red hot hack!
route.set_attributes(participant=Participant.from_id(route.participant))
return route
@classmethod
def from_network(cls, participant, network, cursor=None):
participant_id = participant.id
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if route:
route.set_attributes(participant=participant)
return route
@classmethod
def from_address(cls, participant, network, address, cursor=None):
participant_id = participant.id
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if route:
route.set_attributes(participant=participant)
return route
@classmethod
def insert(cls, participant, network, address, fee_cap=None, cursor=None):
participant_id = participant.id
error = ''
route = (cursor or cls.db).one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'braintree-cc':
participant.update_giving_and_teams()
route.set_attributes(participant=participant)
return route
def invalidate(self):
if self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
with self.db.get_cursor() as cursor:
self.db.run("UPDATE exchange_routes SET is_deleted=true WHERE id=%s", (self.id,))
payload = dict( id=self.participant.id
, exchange_route=self.id
, action='invalidate route'
, address=self.address
)
self.app.add_event(cursor, 'participant', payload)
self.set_attributes(is_deleted=True)
def revive(self):
assert self.network == 'paypal' # sanity check
with self.db.get_cursor() as cursor:
cursor.run("UPDATE exchange_routes SET is_deleted=false WHERE id=%s", (self.id,))
payload = dict( id=self.participant.id
, exchange_route=self.id
, action='revive route'
, address=self.address
)
self.app.add_event(cursor, 'participant', payload)
self.set_attributes(is_deleted=False)
def update_error(self, new_error):
if self.is_deleted:
return
id = self.id
old_error = self.error
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update cached amounts if requested and necessary
if self.network != 'braintree-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
# XXX *White* hot hack!
# =====================
# During payday, participant is a record from a select of
# payday_participants (or whatever), *not* an actual Participant
# object. We need the real deal so we can use a method on it ...
from gratipay.models.participant import Participant
participant = Participant.from_username(self.participant.username)
participant.update_giving_and_teams()
| mit | 6557ba485f44d162effdfdd13a762059 | 34.772059 | 94 | 0.563823 | 4.339875 | false | false | false | false |
gratipay/gratipay.com | gratipay/utils/history.py | 1 | 7917 | """Helpers to fetch logs of payments made to/from a participant.
Data is fetched from 3 tables: `transfers`, `payments` and `exchanges`. For
details on what these tables represent, see :ref:`db-schema`.
"""
from datetime import datetime
from decimal import Decimal
from aspen import Response
from psycopg2 import IntegrityError
def get_end_of_year_balance(db, participant, year, current_year):
if year == current_year:
return participant.balance
start = participant.claimed_time or participant.ctime
if year < start.year:
return Decimal('0.00')
balance = db.one("""
SELECT balance
FROM balances_at
WHERE participant = %s
AND "at" = %s
""", (participant.id, datetime(year+1, 1, 1)))
if balance is not None:
return balance
username = participant.username
start_balance = get_end_of_year_balance(db, participant, year-1, current_year)
delta = db.one("""
SELECT (
SELECT COALESCE(sum(amount), 0) AS a
FROM exchanges
WHERE participant = %(username)s
AND extract(year from timestamp) = %(year)s
AND amount > 0
AND (status is null OR status = 'succeeded')
) + (
SELECT COALESCE(sum(amount-fee), 0) AS a
FROM exchanges
WHERE participant = %(username)s
AND extract(year from timestamp) = %(year)s
AND amount < 0
AND (status is null OR status <> 'failed')
) + (
SELECT COALESCE(sum(-amount), 0) AS a
FROM transfers
WHERE tipper = %(username)s
AND extract(year from timestamp) = %(year)s
) + (
SELECT COALESCE(sum(amount), 0) AS a
FROM transfers
WHERE tippee = %(username)s
AND extract(year from timestamp) = %(year)s
) + (
SELECT COALESCE(sum(amount), 0) AS a
FROM payments
WHERE participant = %(username)s
AND direction = 'to-participant'
AND extract(year from timestamp) = %(year)s
) + (
SELECT COALESCE(sum(-amount), 0) AS a
FROM payments
WHERE participant = %(username)s
AND direction = 'to-team'
AND extract(year from timestamp) = %(year)s
) AS delta
""", locals())
balance = start_balance + delta
try:
db.run("""
INSERT INTO balances_at
(participant, at, balance)
VALUES (%s, %s, %s)
""", (participant.id, datetime(year+1, 1, 1), balance))
except IntegrityError:
pass
return balance
def iter_payday_events(db, participant, year=None):
"""Yields payday events for the given participant.
"""
current_year = datetime.utcnow().year
year = year or current_year
username = participant.username
exchanges = db.all("""
SELECT *
FROM exchanges
WHERE participant=%(username)s
AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
payments = db.all("""
SELECT *
FROM payments
WHERE participant=%(username)s
AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
transfers = db.all("""
SELECT *
FROM transfers
WHERE (tipper=%(username)s OR tippee=%(username)s)
AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
if not (exchanges or payments or transfers):
return
if payments or transfers:
payments_given = sum([p['amount'] for p in payments if p['direction'] == 'to-team'])
payments_received = sum([p['amount'] for p in payments \
if p['direction'] == 'to-participant'])
transfers_given = sum(t['amount'] for t in transfers \
if t['tipper'] == username and t['context'] != 'take')
transfers_received = sum(t['amount'] for t in transfers if t['tippee'] == username)
yield dict( kind='totals'
, given=payments_given + transfers_given
, received=payments_received + transfers_received
)
payday_dates = db.all("""
SELECT ts_start::date
FROM paydays
ORDER BY ts_start ASC
""")
balance = get_end_of_year_balance(db, participant, year, current_year)
prev_date = None
get_timestamp = lambda e: e['timestamp']
events = sorted(exchanges+payments+transfers, key=get_timestamp, reverse=True)
for event in events:
event['balance'] = balance
event_date = event['timestamp'].date()
if event_date != prev_date:
if prev_date:
yield dict(kind='day-close', balance=balance)
day_open = dict(kind='day-open', date=event_date, balance=balance)
if payday_dates:
while payday_dates and payday_dates[-1] > event_date:
payday_dates.pop()
payday_date = payday_dates[-1] if payday_dates else None
if event_date == payday_date:
day_open['payday_number'] = len(payday_dates) - 1
yield day_open
prev_date = event_date
if 'fee' in event:
if event['amount'] > 0:
kind = 'charge'
if event['status'] in (None, 'succeeded'):
balance -= event['amount']
else:
kind = 'credit'
if event['status'] != 'failed':
balance -= event['amount'] - event['fee']
elif 'direction' in event:
kind = 'payment'
if event['direction'] == 'to-participant':
balance -= event['amount']
else:
assert event['direction'] == 'to-team'
balance += event['amount']
else:
kind = 'transfer'
if event['tippee'] == username:
balance -= event['amount']
else:
balance += event['amount']
event['kind'] = kind
yield event
yield dict(kind='day-close', balance=balance)
def export_history(participant, year, key, back_as='namedtuple', require_key=False):
db = participant.db
params = dict(username=participant.username, year=year)
out = {}
out['given'] = lambda: db.all("""
SELECT CONCAT('~', tippee) as tippee, sum(amount) AS amount
FROM transfers
WHERE tipper = %(username)s
AND extract(year from timestamp) = %(year)s
GROUP BY tippee
UNION
SELECT team as tippee, sum(amount) AS amount
FROM payments
WHERE participant = %(username)s
AND direction = 'to-team'
AND extract(year from timestamp) = %(year)s
GROUP BY tippee
""", params, back_as=back_as)
# FIXME: Include values from the `payments` table
out['taken'] = lambda: db.all("""
SELECT tipper AS team, sum(amount) AS amount
FROM transfers
WHERE tippee = %(username)s
AND context = 'take'
AND extract(year from timestamp) = %(year)s
GROUP BY tipper
""", params, back_as=back_as)
if key:
try:
return out[key]()
except KeyError:
raise Response(400, "bad key `%s`" % key)
elif require_key:
raise Response(400, "missing `key` parameter")
else:
return {k: v() for k, v in out.items()}
| mit | 16837836d5b09b17db1894df1b3fc6b8 | 34.662162 | 99 | 0.524946 | 4.293384 | false | false | false | false |
gratipay/gratipay.com | tests/py/test_team_sidebar.py | 1 | 1998 | from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import Harness
class Tests(Harness):
def setUp(self):
self.admin = self.make_participant('admin', is_admin=True)
self.picard = self.make_participant('picard')
self.bob = self.make_participant('bob')
self.make_team(name='approved', is_approved=True, available=100)
def test_set_status_visible_to_admin(self):
assert 'Set Status' in self.client.GET('/approved/', auth_as='admin').body
def test_set_status_not_visible_to_owner(self):
assert 'Set Status' not in self.client.GET('/approved/', auth_as='picard').body
def test_set_status_not_visible_to_rando(self):
assert 'Set Status' not in self.client.GET('/approved/', auth_as='bob').body
def test_nav_visible_to_admin(self):
assert 'Profile' in self.client.GET('/approved/', auth_as='admin').body
def test_nav_visible_to_owner(self):
assert 'Profile' in self.client.GET('/approved/', auth_as='picard').body
def test_nav_not_visible_to_rando(self):
assert 'Profile' not in self.client.GET('/approved/', auth_as='bob').body
def test_nav_not_visible_to_unapproved_team_owner(self):
self.make_team(name='not-approved', is_approved=False, owner='picard')
assert 'Profile' not in self.client.GET('/not-approved/', auth_as='picard').body
def test_distributing_visible_to_owner(self):
assert 'Distributing' in self.client.GET('/approved/', auth_as='picard').body
def test_distributing_not_visible_to_rando(self):
assert 'Distributing' not in self.client.GET('/approved/', auth_as='bob').body
def test_distributing_not_visible_to_owner_without_available(self):
self.make_team(name='approved-nothing-available', is_approved=True, owner='picard')
data = self.client.GET('/approved-nothing-available/', auth_as='picard')
assert 'Distributing' not in data.body
| mit | d2407d77aa16f161e99a1db226c0434d | 40.625 | 91 | 0.676176 | 3.450777 | false | true | false | false |
msikma/pokesprite | scripts/check_data.py | 1 | 1118 | #!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
| mit | ef61fbf3f68061966dddfd25df453d7c | 23.304348 | 85 | 0.661002 | 3.408537 | false | false | false | false |
snare/voltron | voltron/plugins/api/registers.py | 1 | 2328 | import voltron
import logging
from voltron.api import *
log = logging.getLogger('api')
class APIRegistersRequest(APIRequest):
"""
API state request.
{
"type": "request",
"request": "registers",
"data": {
"target_id": 0,
"thread_id": 123456,
"registers": ['rsp']
}
}
`target_id` and `thread_id` are optional. If not present, the currently
selected target and thread will be used.
`registers` is optional. If it is not included all registers will be
returned.
"""
_fields = {'target_id': False, 'thread_id': False, 'registers': False}
target_id = 0
thread_id = None
registers = []
@server_side
def dispatch(self):
try:
regs = voltron.debugger.registers(target_id=self.target_id, thread_id=self.thread_id, registers=self.registers)
res = APIRegistersResponse()
res.registers = regs
res.deref = {}
for reg, val in regs.items():
try:
if val > 0:
try:
res.deref[reg] = voltron.debugger.dereference(pointer=val)
except:
res.deref[reg] = []
else:
res.deref[reg] = []
except TypeError:
res.deref[reg] = []
except TargetBusyException:
res = APITargetBusyErrorResponse()
except NoSuchTargetException:
res = APINoSuchTargetErrorResponse()
except Exception as e:
msg = "Exception getting registers from debugger: {}".format(repr(e))
log.exception(msg)
res = APIGenericErrorResponse(msg)
return res
class APIRegistersResponse(APISuccessResponse):
"""
API status response.
{
"type": "response",
"status": "success",
"data": {
"registers": { "rip": 0x12341234, ... },
"deref": {"rip": [(pointer, 0x12341234), ...]}
}
}
"""
_fields = {'registers': True, 'deref': False}
class APIRegistersPlugin(APIPlugin):
request = 'registers'
request_class = APIRegistersRequest
response_class = APIRegistersResponse
| mit | 5e82a9a3fd6750baf08170f1e43ff3d6 | 26.714286 | 123 | 0.524485 | 4.279412 | false | false | false | false |
sdispater/orator | orator/schema/builder.py | 1 | 3617 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from .blueprint import Blueprint
class SchemaBuilder(object):
def __init__(self, connection):
"""
:param connection: The schema connection
:type connection: orator.connections.Connection
"""
self._connection = connection
self._grammar = connection.get_schema_grammar()
def has_table(self, table):
"""
Determine if the given table exists.
:param table: The table
:type table: str
:rtype: bool
"""
sql = self._grammar.compile_table_exists()
table = self._connection.get_table_prefix() + table
return len(self._connection.select(sql, [table])) > 0
def has_column(self, table, column):
"""
Determine if the given table has a given column.
:param table: The table
:type table: str
:type column: str
:rtype: bool
"""
column = column.lower()
return column in list(map(lambda x: x.lower(), self.get_column_listing(table)))
def get_column_listing(self, table):
"""
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
"""
table = self._connection.get_table_prefix() + table
results = self._connection.select(self._grammar.compile_column_exists(table))
return self._connection.get_post_processor().process_column_listing(results)
@contextmanager
def table(self, table):
"""
Modify a table on the schema.
:param table: The table
"""
try:
blueprint = self._create_blueprint(table)
yield blueprint
except Exception as e:
raise
try:
self._build(blueprint)
except Exception:
raise
@contextmanager
def create(self, table):
"""
Create a new table on the schema.
:param table: The table
:type table: str
:rtype: Blueprint
"""
try:
blueprint = self._create_blueprint(table)
blueprint.create()
yield blueprint
except Exception as e:
raise
try:
self._build(blueprint)
except Exception:
raise
def drop(self, table):
"""
Drop a table from the schema.
:param table: The table
:type table: str
"""
blueprint = self._create_blueprint(table)
blueprint.drop()
self._build(blueprint)
def drop_if_exists(self, table):
"""
Drop a table from the schema.
:param table: The table
:type table: str
"""
blueprint = self._create_blueprint(table)
blueprint.drop_if_exists()
self._build(blueprint)
def rename(self, from_, to):
"""
Rename a table on the schema.
"""
blueprint = self._create_blueprint(from_)
blueprint.rename(to)
self._build(blueprint)
def _build(self, blueprint):
"""
Execute the blueprint to build / modify the table.
:param blueprint: The blueprint
:type blueprint: orator.schema.Blueprint
"""
blueprint.build(self._connection, self._grammar)
def _create_blueprint(self, table):
return Blueprint(table)
def get_connection(self):
return self._connection
def set_connection(self, connection):
self._connection = connection
return self
| mit | 3e3db6258acffb35f26a936ee50f55c8 | 22.038217 | 87 | 0.560133 | 4.62532 | false | false | false | false |
sdispater/orator | orator/dbal/platforms/mysql_platform.py | 1 | 9489 | # -*- coding: utf-8 -*-
from .platform import Platform
from .keywords.mysql_keywords import MySQLKeywords
from ..identifier import Identifier
class MySQLPlatform(Platform):
LENGTH_LIMIT_TINYTEXT = 255
LENGTH_LIMIT_TEXT = 65535
LENGTH_LIMIT_MEDIUMTEXT = 16777215
LENGTH_LIMIT_TINYBLOB = 255
LENGTH_LIMIT_BLOB = 65535
LENGTH_LIMIT_MEDIUMBLOB = 16777215
INTERNAL_TYPE_MAPPING = {
"tinyint": "boolean",
"smallint": "smallint",
"mediumint": "integer",
"int": "integer",
"integer": "integer",
"bigint": "bigint",
"int8": "bigint",
"bool": "boolean",
"boolean": "boolean",
"tinytext": "text",
"mediumtext": "text",
"longtext": "text",
"text": "text",
"varchar": "string",
"string": "string",
"char": "string",
"date": "date",
"datetime": "datetime",
"timestamp": "datetime",
"time": "time",
"float": "float",
"double": "float",
"real": "float",
"decimal": "decimal",
"numeric": "decimal",
"year": "date",
"longblob": "blob",
"blob": "blob",
"mediumblob": "blob",
"tinyblob": "blob",
"binary": "binary",
"varbinary": "binary",
"set": "simple_array",
"enum": "enum",
}
def get_list_table_columns_sql(self, table, database=None):
if database:
database = "'%s'" % database
else:
database = "DATABASE()"
return (
"SELECT COLUMN_NAME AS field, COLUMN_TYPE AS type, IS_NULLABLE AS `null`, "
"COLUMN_KEY AS `key`, COLUMN_DEFAULT AS `default`, EXTRA AS extra, COLUMN_COMMENT AS comment, "
"CHARACTER_SET_NAME AS character_set, COLLATION_NAME AS collation "
"FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = '%s'"
% (database, table)
)
def get_list_table_indexes_sql(self, table, current_database=None):
sql = """
SELECT TABLE_NAME AS `Table`, NON_UNIQUE AS Non_Unique, INDEX_NAME AS Key_name,
SEQ_IN_INDEX AS Seq_in_index, COLUMN_NAME AS Column_Name, COLLATION AS Collation,
CARDINALITY AS Cardinality, SUB_PART AS Sub_Part, PACKED AS Packed,
NULLABLE AS `Null`, INDEX_TYPE AS Index_Type, COMMENT AS Comment
FROM information_schema.STATISTICS WHERE TABLE_NAME = '%s'
"""
if current_database:
sql += " AND TABLE_SCHEMA = '%s'" % current_database
return sql % table
def get_list_table_foreign_keys_sql(self, table, database=None):
sql = (
"SELECT DISTINCT k.`CONSTRAINT_NAME`, k.`COLUMN_NAME`, k.`REFERENCED_TABLE_NAME`, "
"k.`REFERENCED_COLUMN_NAME` /*!50116 , c.update_rule, c.delete_rule */ "
"FROM information_schema.key_column_usage k /*!50116 "
"INNER JOIN information_schema.referential_constraints c ON "
" c.constraint_name = k.constraint_name AND "
" c.table_name = '%s' */ WHERE k.table_name = '%s'" % (table, table)
)
if database:
sql += (
" AND k.table_schema = '%s' /*!50116 AND c.constraint_schema = '%s' */"
% (database, database)
)
sql += " AND k.`REFERENCED_COLUMN_NAME` IS NOT NULL"
return sql
def get_alter_table_sql(self, diff):
"""
Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list
"""
column_sql = []
query_parts = []
if diff.new_name is not False:
query_parts.append(
"RENAME TO %s" % diff.get_new_name().get_quoted_name(self)
)
# Added columns?
# Removed columns?
for column_diff in diff.changed_columns.values():
column = column_diff.column
column_dict = column.to_dict()
# Don't propagate default value changes for unsupported column types.
if (
column_diff.has_changed("default")
and len(column_diff.changed_properties) == 1
and (column_dict["type"] == "text" or column_dict["type"] == "blob")
):
continue
query_parts.append(
"CHANGE %s %s"
% (
column_diff.get_old_column_name().get_quoted_name(self),
self.get_column_declaration_sql(
column.get_quoted_name(self), column_dict
),
)
)
for old_column_name, column in diff.renamed_columns.items():
column_dict = column.to_dict()
old_column_name = Identifier(old_column_name)
query_parts.append(
"CHANGE %s %s"
% (
self.quote(old_column_name.get_quoted_name(self)),
self.get_column_declaration_sql(
self.quote(column.get_quoted_name(self)), column_dict
),
)
)
sql = []
if len(query_parts) > 0:
sql.append(
"ALTER TABLE %s %s"
% (diff.get_name(self).get_quoted_name(self), ", ".join(query_parts))
)
return sql
def convert_booleans(self, item):
if isinstance(item, list):
for i, value in enumerate(item):
if isinstance(value, bool):
item[i] = str(value).lower()
elif isinstance(item, bool):
item = str(item).lower()
return item
def get_boolean_type_declaration_sql(self, column):
return "TINYINT(1)"
def get_integer_type_declaration_sql(self, column):
return "INT " + self._get_common_integer_type_declaration_sql(column)
def get_bigint_type_declaration_sql(self, column):
return "BIGINT " + self._get_common_integer_type_declaration_sql(column)
def get_smallint_type_declaration_sql(self, column):
return "SMALLINT " + self._get_common_integer_type_declaration_sql(column)
def get_guid_type_declaration_sql(self, column):
return "UUID"
def get_datetime_type_declaration_sql(self, column):
if "version" in column and column["version"] == True:
return "TIMESTAMP"
return "DATETIME"
def get_date_type_declaration_sql(self, column):
return "DATE"
def get_time_type_declaration_sql(self, column):
return "TIME"
def get_varchar_type_declaration_sql_snippet(self, length, fixed):
if fixed:
return "CHAR(%s)" % length if length else "CHAR(255)"
else:
return "VARCHAR(%s)" % length if length else "VARCHAR(255)"
def get_binary_type_declaration_sql_snippet(self, length, fixed):
if fixed:
return "BINARY(%s)" % (length or 255)
else:
return "VARBINARY(%s)" % (length or 255)
def get_text_type_declaration_sql(self, column):
length = column.get("length")
if length:
if length <= self.LENGTH_LIMIT_TINYTEXT:
return "TINYTEXT"
if length <= self.LENGTH_LIMIT_TEXT:
return "TEXT"
if length <= self.LENGTH_LIMIT_MEDIUMTEXT:
return "MEDIUMTEXT"
return "LONGTEXT"
def get_blob_type_declaration_sql(self, column):
length = column.get("length")
if length:
if length <= self.LENGTH_LIMIT_TINYBLOB:
return "TINYBLOB"
if length <= self.LENGTH_LIMIT_BLOB:
return "BLOB"
if length <= self.LENGTH_LIMIT_MEDIUMBLOB:
return "MEDIUMBLOB"
return "LONGBLOB"
def get_clob_type_declaration_sql(self, column):
length = column.get("length")
if length:
if length <= self.LENGTH_LIMIT_TINYTEXT:
return "TINYTEXT"
if length <= self.LENGTH_LIMIT_TEXT:
return "TEXT"
if length <= self.LENGTH_LIMIT_MEDIUMTEXT:
return "MEDIUMTEXT"
return "LONGTEXT"
def get_decimal_type_declaration_sql(self, column):
decl = super(MySQLPlatform, self).get_decimal_type_declaration_sql(column)
return decl + self.get_unsigned_declaration(column)
def get_unsigned_declaration(self, column):
if column.get("unsigned"):
return " UNSIGNED"
return ""
def _get_common_integer_type_declaration_sql(self, column):
autoinc = ""
if column.get("autoincrement"):
autoinc = " AUTO_INCREMENT"
return self.get_unsigned_declaration(column) + autoinc
def get_float_type_declaration_sql(self, column):
return "DOUBLE PRECISION" + self.get_unsigned_declaration(column)
def get_enum_type_declaration_sql(self, column):
return "ENUM{}".format(column["extra"]["definition"])
def supports_foreign_key_constraints(self):
return True
def supports_column_collation(self):
return False
def quote(self, name):
return "`%s`" % name.replace("`", "``")
def _get_reserved_keywords_class(self):
return MySQLKeywords
def get_identifier_quote_character(self):
return "`"
| mit | 64261b4267b5d273c039818e4685519c | 30.949495 | 107 | 0.550848 | 3.958698 | false | false | false | false |
sdispater/orator | orator/schema/grammars/postgres_grammar.py | 1 | 7103 | # -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
from ..blueprint import Blueprint
from ...query.expression import QueryExpression
from ...support.fluent import Fluent
class PostgresSchemaGrammar(SchemaGrammar):
_modifiers = ["increment", "nullable", "default"]
_serials = [
"big_integer",
"integer",
"medium_integer",
"small_integer",
"tiny_integer",
]
marker = "%s"
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
table = self.get_table_prefix() + blueprint.get_table()
column = self.wrap(command.from_)
return "ALTER TABLE %s RENAME COLUMN %s TO %s" % (
table,
column,
self.wrap(command.to),
)
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
return (
"SELECT * "
"FROM information_schema.tables "
"WHERE table_name = %(marker)s" % {"marker": self.get_marker()}
)
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return (
"SELECT column_name "
"FROM information_schema.columns "
"WHERE table_name = '%s'" % table
)
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ", ".join(self._get_columns(blueprint))
return "CREATE TABLE %s (%s)" % (self.wrap_table(blueprint), columns)
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list("ADD COLUMN", self._get_columns(blueprint))
return "ALTER TABLE %s %s" % (table, ", ".join(columns))
def compile_primary(self, blueprint, command, _):
columns = self.columnize(command.columns)
return "ALTER TABLE %s ADD PRIMARY KEY (%s)" % (
self.wrap_table(blueprint),
columns,
)
def compile_unique(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return "ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)" % (
table,
command.index,
columns,
)
def compile_index(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return "CREATE INDEX %s ON %s (%s)" % (command.index, table, columns)
def compile_drop(self, blueprint, command, _):
return "DROP TABLE %s" % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return "DROP TABLE IF EXISTS %s" % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
columns = self.prefix_list("DROP COLUMN", self.wrap_list(command.columns))
table = self.wrap_table(blueprint)
return "ALTER TABLE %s %s" % (table, ", ".join(columns))
def compile_drop_primary(self, blueprint, command, _):
table = blueprint.get_table()
return "ALTER TABLE %s DROP CONSTRAINT %s_pkey" % (
self.wrap_table(blueprint),
table,
)
def compile_drop_unique(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return "ALTER TABLE %s DROP CONSTRAINT %s" % (table, command.index)
def compile_drop_index(self, blueprint, command, _):
return "DROP INDEX %s" % command.index
def compile_drop_foreign(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return "ALTER TABLE %s DROP CONSTRAINT %s" % (table, command.index)
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return "ALTER TABLE %s RENAME TO %s" % (from_, self.wrap_table(command.to))
def _type_char(self, column):
return "CHAR(%s)" % column.length
def _type_string(self, column):
return "VARCHAR(%s)" % column.length
def _type_text(self, column):
return "TEXT"
def _type_medium_text(self, column):
return "TEXT"
def _type_long_text(self, column):
return "TEXT"
def _type_integer(self, column):
return "SERIAL" if column.auto_increment else "INTEGER"
def _type_big_integer(self, column):
return "BIGSERIAL" if column.auto_increment else "BIGINT"
def _type_medium_integer(self, column):
return "SERIAL" if column.auto_increment else "INTEGER"
def _type_tiny_integer(self, column):
return "SMALLSERIAL" if column.auto_increment else "SMALLINT"
def _type_small_integer(self, column):
return "SMALLSERIAL" if column.auto_increment else "SMALLINT"
def _type_float(self, column):
return self._type_double(column)
def _type_double(self, column):
return "DOUBLE PRECISION"
def _type_decimal(self, column):
return "DECIMAL(%s, %s)" % (column.total, column.places)
def _type_boolean(self, column):
return "BOOLEAN"
def _type_enum(self, column):
allowed = list(map(lambda a: "'%s'" % a, column.allowed))
return 'VARCHAR(255) CHECK ("%s" IN (%s))' % (column.name, ", ".join(allowed))
def _type_json(self, column):
return "JSON"
def _type_date(self, column):
return "DATE"
def _type_datetime(self, column):
return "TIMESTAMP(6) WITHOUT TIME ZONE"
def _type_time(self, column):
return "TIME(6) WITHOUT TIME ZONE"
def _type_timestamp(self, column):
if column.use_current:
return "TIMESTAMP(6) WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP(6)"
return "TIMESTAMP(6) WITHOUT TIME ZONE"
def _type_binary(self, column):
return "BYTEA"
def _modify_nullable(self, blueprint, column):
if column.get("nullable"):
return " NULL"
return " NOT NULL"
def _modify_default(self, blueprint, column):
if column.get("default") is not None:
return " DEFAULT %s" % self._get_default_value(column.default)
return ""
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return " PRIMARY KEY"
return ""
def _get_dbal_column_type(self, type_):
"""
Get the dbal column type.
:param type_: The fluent type
:type type_: str
:rtype: str
"""
type_ = type_.lower()
if type_ == "enum":
return "string"
return super(PostgresSchemaGrammar, self)._get_dbal_column_type(type_)
| mit | b752a56bfe6b68a50c5a8d4fc9a9979d | 27.412 | 86 | 0.591581 | 3.970375 | false | false | false | false |
sdispater/orator | orator/dbal/platforms/platform.py | 1 | 21672 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from ..index import Index
from ..table import Table
from ..identifier import Identifier
from ..exceptions import DBALException
from ...utils import basestring
class Platform(object):
_keywords = None
CREATE_INDEXES = 1
CREATE_FOREIGNKEYS = 2
INTERNAL_TYPE_MAPPING = {}
def __init__(self, version=None):
self._version = None
def get_default_value_declaration_sql(self, field):
default = ""
if not field.get("notnull"):
default = " DEFAULT NULL"
if "default" in field and field["default"] is not None:
default = " DEFAULT '%s'" % field["default"]
if "type" in field:
type = field["type"]
if type in ["integer", "bigint", "smallint"]:
default = " DEFAULT %s" % field["default"]
elif type in ["datetime", "datetimetz"] and field["default"] in [
self.get_current_timestamp_sql(),
"NOW",
"now",
]:
default = " DEFAULT %s" % self.get_current_timestamp_sql()
elif type in ["time"] and field["default"] in [
self.get_current_time_sql(),
"NOW",
"now",
]:
default = " DEFAULT %s" % self.get_current_time_sql()
elif type in ["date"] and field["default"] in [
self.get_current_date_sql(),
"NOW",
"now",
]:
default = " DEFAULT %s" % self.get_current_date_sql()
elif type in ["boolean"]:
default = " DEFAULT '%s'" % self.convert_booleans(field["default"])
return default
def convert_booleans(self, item):
if isinstance(item, list):
for i, value in enumerate(item):
if isinstance(value, bool):
item[i] = int(value)
elif isinstance(item, bool):
item = int(item)
return item
def get_check_declaration_sql(self, definition):
"""
Obtains DBMS specific SQL code portion needed to set a CHECK constraint
declaration to be used in statements like CREATE TABLE.
:param definition: The check definition
:type definition: dict
:return: DBMS specific SQL code portion needed to set a CHECK constraint.
:rtype: str
"""
constraints = []
for field, def_ in definition.items():
if isinstance(def_, basestring):
constraints.append("CHECK (%s)" % def_)
else:
if "min" in def_:
constraints.append("CHECK (%s >= %s)" % (field, def_["min"]))
if "max" in def_:
constraints.append("CHECK (%s <= %s)" % (field, def_["max"]))
return ", ".join(constraints)
def get_unique_constraint_declaration_sql(self, name, index):
"""
Obtains DBMS specific SQL code portion needed to set a unique
constraint declaration to be used in statements like CREATE TABLE.
:param name: The name of the unique constraint.
:type name: str
:param index: The index definition
:type index: Index
:return: DBMS specific SQL code portion needed to set a constraint.
:rtype: str
"""
columns = index.get_quoted_columns(self)
name = Identifier(name)
if not columns:
raise DBALException('Incomplete definition. "columns" required.')
return "CONSTRAINT %s UNIQUE (%s)%s" % (
name.get_quoted_name(self),
self.get_index_field_declaration_list_sql(columns),
self.get_partial_index_sql(index),
)
def get_index_declaration_sql(self, name, index):
"""
Obtains DBMS specific SQL code portion needed to set an index
declaration to be used in statements like CREATE TABLE.
:param name: The name of the index.
:type name: str
:param index: The index definition
:type index: Index
:return: DBMS specific SQL code portion needed to set an index.
:rtype: str
"""
columns = index.get_quoted_columns(self)
name = Identifier(name)
if not columns:
raise DBALException('Incomplete definition. "columns" required.')
return "%sINDEX %s (%s)%s" % (
self.get_create_index_sql_flags(index),
name.get_quoted_name(self),
self.get_index_field_declaration_list_sql(columns),
self.get_partial_index_sql(index),
)
def get_foreign_key_declaration_sql(self, foreign_key):
"""
Obtain DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
sql = self.get_foreign_key_base_declaration_sql(foreign_key)
sql += self.get_advanced_foreign_key_options_sql(foreign_key)
return sql
def get_advanced_foreign_key_options_sql(self, foreign_key):
"""
Returns the FOREIGN KEY query section dealing with non-standard options
as MATCH, INITIALLY DEFERRED, ON UPDATE, ...
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
query = ""
if self.supports_foreign_key_on_update() and foreign_key.has_option(
"on_update"
):
query += " ON UPDATE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_update")
)
if foreign_key.has_option("on_delete"):
query += " ON DELETE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_delete")
)
return query
def get_foreign_key_referential_action_sql(self, action):
"""
Returns the given referential action in uppercase if valid, otherwise throws an exception.
:param action: The action
:type action: str
:rtype: str
"""
action = action.upper()
if action not in [
"CASCADE",
"SET NULL",
"NO ACTION",
"RESTRICT",
"SET DEFAULT",
]:
raise DBALException("Invalid foreign key action: %s" % action)
return action
def get_foreign_key_base_declaration_sql(self, foreign_key):
"""
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
sql = ""
if foreign_key.get_name():
sql += "CONSTRAINT %s " % foreign_key.get_quoted_name(self)
sql += "FOREIGN KEY ("
if not foreign_key.get_local_columns():
raise DBALException('Incomplete definition. "local" required.')
if not foreign_key.get_foreign_columns():
raise DBALException('Incomplete definition. "foreign" required.')
if not foreign_key.get_foreign_table_name():
raise DBALException('Incomplete definition. "foreign_table" required.')
sql += "%s) REFERENCES %s (%s)" % (
", ".join(foreign_key.get_quoted_local_columns(self)),
foreign_key.get_quoted_foreign_table_name(self),
", ".join(foreign_key.get_quoted_foreign_columns(self)),
)
return sql
def get_current_date_sql(self):
return "CURRENT_DATE"
def get_current_time_sql(self):
return "CURRENT_TIME"
def get_current_timestamp_sql(self):
return "CURRENT_TIMESTAMP"
def get_sql_type_declaration(self, column):
internal_type = column["type"]
return getattr(self, "get_%s_type_declaration_sql" % internal_type)(column)
def get_column_declaration_list_sql(self, fields):
"""
Gets declaration of a number of fields in bulk.
"""
query_fields = []
for name, field in fields.items():
query_fields.append(self.get_column_declaration_sql(name, field))
return ", ".join(query_fields)
def get_column_declaration_sql(self, name, field):
if "column_definition" in field:
column_def = self.get_custom_type_declaration_sql(field)
else:
default = self.get_default_value_declaration_sql(field)
charset = field.get("charset", "")
if charset:
charset = " " + self.get_column_charset_declaration_sql(charset)
collation = field.get("collation", "")
if charset:
charset = " " + self.get_column_collation_declaration_sql(charset)
notnull = field.get("notnull", "")
if notnull:
notnull = " NOT NULL"
else:
notnull = ""
unique = field.get("unique", "")
if unique:
unique = " " + self.get_unique_field_declaration_sql()
else:
unique = ""
check = field.get("check", "")
type_decl = self.get_sql_type_declaration(field)
column_def = (
type_decl + charset + default + notnull + unique + check + collation
)
return name + " " + column_def
def get_custom_type_declaration_sql(self, column_def):
return column_def["column_definition"]
def get_column_charset_declaration_sql(self, charset):
return ""
def get_column_collation_declaration_sql(self, collation):
if self.supports_column_collation():
return "COLLATE %s" % collation
return ""
def supports_column_collation(self):
return False
def get_unique_field_declaration_sql(self):
return "UNIQUE"
def get_string_type_declaration_sql(self, column):
if "length" not in column:
column["length"] = self.get_varchar_default_length()
fixed = column.get("fixed", False)
if column["length"] > self.get_varchar_max_length():
return self.get_clob_type_declaration_sql(column)
return self.get_varchar_type_declaration_sql_snippet(column["length"], fixed)
def get_binary_type_declaration_sql(self, column):
if "length" not in column:
column["length"] = self.get_binary_default_length()
fixed = column.get("fixed", False)
if column["length"] > self.get_binary_max_length():
return self.get_blob_type_declaration_sql(column)
return self.get_binary_type_declaration_sql_snippet(column["length"], fixed)
def get_varchar_type_declaration_sql_snippet(self, length, fixed):
raise NotImplementedError("VARCHARS not supported by Platform")
def get_binary_type_declaration_sql_snippet(self, length, fixed):
raise NotImplementedError("BINARY/VARBINARY not supported by Platform")
def get_decimal_type_declaration_sql(self, column):
if "precision" not in column or not column["precision"]:
column["precision"] = 10
if "scale" not in column or not column["scale"]:
column["precision"] = 0
return "NUMERIC(%s, %s)" % (column["precision"], column["scale"])
def get_json_type_declaration_sql(self, column):
return self.get_clob_type_declaration_sql(column)
def get_clob_type_declaration_sql(self, column):
raise NotImplementedError()
def get_text_type_declaration_sql(self, column):
return self.get_clob_type_declaration_sql(column)
def get_blob_type_declaration_sql(self, column):
raise NotImplementedError()
def get_varchar_default_length(self):
return 255
def get_varchar_max_length(self):
return 4000
def get_binary_default_length(self):
return 255
def get_binary_max_length(self):
return 4000
def get_column_options(self):
return []
def get_type_mapping(self, db_type):
return self.INTERNAL_TYPE_MAPPING[db_type]
def get_reserved_keywords_list(self):
if self._keywords:
return self._keywords
klass = self._get_reserved_keywords_class()
keywords = klass()
self._keywords = keywords
return keywords
def _get_reserved_keywords_class(self):
raise NotImplementedError
def get_index_field_declaration_list_sql(self, fields):
"""
Obtains DBMS specific SQL code portion needed to set an index
declaration to be used in statements like CREATE TABLE.
:param fields: The columns
:type fields: list
:rtype: sql
"""
ret = []
for field in fields:
ret.append(field)
return ", ".join(ret)
def get_create_index_sql(self, index, table):
"""
Returns the SQL to create an index on a table on this platform.
:param index: The index
:type index: Index
:param table: The table
:type table: Table or str
:rtype: str
"""
if isinstance(table, Table):
table = table.get_quoted_name(self)
name = index.get_quoted_name(self)
columns = index.get_quoted_columns(self)
if not columns:
raise DBALException('Incomplete definition. "columns" required.')
if index.is_primary():
return self.get_create_primary_key_sql(index, table)
query = "CREATE %sINDEX %s ON %s" % (
self.get_create_index_sql_flags(index),
name,
table,
)
query += " (%s)%s" % (
self.get_index_field_declaration_list_sql(columns),
self.get_partial_index_sql(index),
)
return query
def get_partial_index_sql(self, index):
"""
Adds condition for partial index.
:param index: The index
:type index: Index
:rtype: str
"""
if self.supports_partial_indexes() and index.has_option("where"):
return " WHERE %s" % index.get_option("where")
return ""
def get_create_index_sql_flags(self, index):
"""
Adds additional flags for index generation.
:param index: The index
:type index: Index
:rtype: str
"""
if index.is_unique():
return "UNIQUE "
return ""
def get_create_primary_key_sql(self, index, table):
"""
Returns the SQL to create an unnamed primary key constraint.
:param index: The index
:type index: Index
:param table: The table
:type table: Table or str
:rtype: str
"""
return "ALTER TABLE %s ADD PRIMARY KEY (%s)" % (
table,
self.get_index_field_declaration_list_sql(index.get_quoted_columns(self)),
)
def get_create_foreign_key_sql(self, foreign_key, table):
"""
Returns the SQL to create a new foreign key.
:rtype: sql
"""
if isinstance(table, Table):
table = table.get_quoted_name(self)
query = "ALTER TABLE %s ADD %s" % (
table,
self.get_foreign_key_declaration_sql(foreign_key),
)
return query
def get_drop_table_sql(self, table):
"""
Returns the SQL snippet to drop an existing table.
:param table: The table
:type table: Table or str
:rtype: str
"""
if isinstance(table, Table):
table = table.get_quoted_name(self)
return "DROP TABLE %s" % table
def get_drop_index_sql(self, index, table=None):
"""
Returns the SQL to drop an index from a table.
:param index: The index
:type index: Index or str
:param table: The table
:type table: Table or str or None
:rtype: str
"""
if isinstance(index, Index):
index = index.get_quoted_name(self)
return "DROP INDEX %s" % index
def get_create_table_sql(self, table, create_flags=CREATE_INDEXES):
"""
Returns the SQL statement(s) to create a table
with the specified name, columns and constraints
on this platform.
:param table: The table
:type table: Table
:type create_flags: int
:rtype: str
"""
table_name = table.get_quoted_name(self)
options = dict((k, v) for k, v in table.get_options().items())
options["unique_constraints"] = OrderedDict()
options["indexes"] = OrderedDict()
options["primary"] = []
if create_flags & self.CREATE_INDEXES > 0:
for index in table.get_indexes().values():
if index.is_primary():
options["primary"] = index.get_quoted_columns(self)
options["primary_index"] = index
else:
options["indexes"][index.get_quoted_name(self)] = index
columns = OrderedDict()
for column in table.get_columns().values():
column_data = column.to_dict()
column_data["name"] = column.get_quoted_name(self)
if column.has_platform_option("version"):
column_data["version"] = column.get_platform_option("version")
else:
column_data["version"] = False
# column_data['comment'] = self.get_column_comment(column)
if column_data["type"] == "string" and column_data["length"] is None:
column_data["length"] = 255
if column.get_name() in options["primary"]:
column_data["primary"] = True
columns[column_data["name"]] = column_data
if create_flags & self.CREATE_FOREIGNKEYS > 0:
options["foreign_keys"] = []
for fk in table.get_foreign_keys().values():
options["foreign_keys"].append(fk)
sql = self._get_create_table_sql(table_name, columns, options)
# Comments?
return sql
def _get_create_table_sql(self, table_name, columns, options=None):
"""
Returns the SQL used to create a table.
:param table_name: The name of the table to create
:type table_name: str
:param columns: The table columns
:type columns: dict
:param options: The options
:type options: dict
:rtype: str
"""
options = options or {}
column_list_sql = self.get_column_declaration_list_sql(columns)
if options.get("unique_constraints"):
for name, definition in options["unique_constraints"].items():
column_list_sql += ", %s" % self.get_unique_constraint_declaration_sql(
name, definition
)
if options.get("primary"):
column_list_sql += ", PRIMARY KEY(%s)" % ", ".join(options["primary"])
if options.get("indexes"):
for index, definition in options["indexes"]:
column_list_sql += ", %s" % self.get_index_declaration_sql(
index, definition
)
query = "CREATE TABLE %s (%s" % (table_name, column_list_sql)
check = self.get_check_declaration_sql(columns)
if check:
query += ", %s" % check
query += ")"
sql = [query]
if options.get("foreign_keys"):
for definition in options["foreign_keys"]:
sql.append(self.get_create_foreign_key_sql(definition, table_name))
return sql
def quote_identifier(self, string):
"""
Quotes a string so that it can be safely used as a table or column name,
even if it is a reserved word of the platform. This also detects identifier
chains separated by dot and quotes them independently.
:param string: The identifier name to be quoted.
:type string: str
:return: The quoted identifier string.
:rtype: str
"""
if "." in string:
parts = list(map(self.quote_single_identifier, string.split(".")))
return ".".join(parts)
return self.quote_single_identifier(string)
def quote_single_identifier(self, string):
"""
Quotes a single identifier (no dot chain separation).
:param string: The identifier name to be quoted.
:type string: str
:return: The quoted identifier string.
:rtype: str
"""
c = self.get_identifier_quote_character()
return "%s%s%s" % (c, string.replace(c, c + c), c)
def get_identifier_quote_character(self):
return '"'
def supports_indexes(self):
return True
def supports_partial_indexes(self):
return False
def supports_alter_table(self):
return True
def supports_transactions(self):
return True
def supports_primary_constraints(self):
return True
def supports_foreign_key_constraints(self):
return True
def supports_foreign_key_on_update(self):
return self.supports_foreign_key_constraints()
def has_native_json_type(self):
return False
| mit | b65fa1b66b467d15768704ecb2459718 | 29.395512 | 98 | 0.568568 | 4.267822 | false | false | false | false |
sdispater/orator | orator/commands/seeds/seed_command.py | 1 | 1923 | # -*- coding: utf-8 -*-
import importlib
import inflection
import os
from cleo import InputOption
from orator import DatabaseManager
from .base_command import BaseCommand
from ...utils import load_module
class SeedCommand(BaseCommand):
"""
Seed the database with records.
db:seed
{--d|database= : The database connection to use.}
{--p|path= : The path to seeders files.
Defaults to <comment>./seeds</comment>.}
{--seeder=database_seeder : The name of the root seeder.}
{--f|force : Force the operation to run.}
"""
def handle(self):
"""
Executes the command.
"""
if not self.confirm_to_proceed(
"<question>Are you sure you want to seed the database?:</question> "
):
return
self.resolver.set_default_connection(self.option("database"))
self._get_seeder().run()
self.info("Database seeded!")
def _get_seeder(self):
name = self._parse_name(self.option("seeder"))
seeder_file = self._get_path(name)
# Loading parent module
load_module("seeds", self._get_path("__init__"))
# Loading module
mod = load_module("seeds.%s" % name, seeder_file)
klass = getattr(mod, inflection.camelize(name))
instance = klass()
instance.set_command(self)
instance.set_connection_resolver(self.resolver)
return instance
def _parse_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "", -1)
return name
def _get_path(self, name):
"""
Get the destination class path.
:param name: The name
:type name: str
:rtype: str
"""
path = self.option("path")
if path is None:
path = self._get_seeders_path()
return os.path.join(path, "%s.py" % name)
| mit | 323e452d7087dc78e51b69e1b2854aed | 24.302632 | 80 | 0.571503 | 3.916497 | false | false | false | false |
sdispater/orator | orator/query/processors/mysql_processor.py | 1 | 1790 | # -*- coding: utf-8 -*-
from .processor import QueryProcessor
class MySQLQueryProcessor(QueryProcessor):
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
if not query.get_connection().transaction_level():
with query.get_connection().transaction():
query.get_connection().insert(sql, values)
cursor = query.get_connection().get_cursor()
if hasattr(cursor, "lastrowid"):
id = cursor.lastrowid
else:
id = query.get_connection().statement("SELECT LAST_INSERT_ID()")
else:
query.get_connection().insert(sql, values)
cursor = query.get_connection().get_cursor()
if hasattr(cursor, "lastrowid"):
id = cursor.lastrowid
else:
id = query.get_connection().statement("SELECT LAST_INSERT_ID()")
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id
def process_column_listing(self, results):
"""
Process the results of a column listing query
:param results: The query results
:type results: dict
:return: The processed results
:return: list
"""
return list(map(lambda x: x["column_name"], results))
| mit | 3929819c92361c7e35cedc109d557c1f | 27.870968 | 84 | 0.556983 | 4.613402 | false | false | false | false |
sdispater/orator | orator/utils/url.py | 1 | 7566 | # -*- coding: utf-8 -*-
# engine/url.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates
information about a database connection specification.
The URL object is created automatically when
:func:`~sqlalchemy.engine.create_engine` is called with a string
argument; alternatively, the URL is a public-facing construct which can
be used directly and is also accepted directly by ``create_engine()``.
"""
import re
from . import parse_qsl, unquote_plus, unquote, basestring, PY2
from ..exceptions import ArgumentError
class URL(object):
"""
Represent the components of a URL used to connect to a database.
All initialization parameters are available as public attributes.
:param drivername: the name of the database backend.
:param username: The user name.
:param password: database password.
:param host: The name of the host.
:param port: The port number.
:param database: The database name.
:param query: A dictionary of options to be passed to the
dialect and/or the DBAPI upon connect.
"""
def __init__(
self,
drivername,
username=None,
password=None,
host=None,
port=None,
database=None,
query=None,
):
self.drivername = drivername
self.username = username
self.password = password
self.host = host
if port is not None:
self.port = int(port)
else:
self.port = None
self.database = database
self.query = query or {}
def __to_string__(self, hide_password=True):
s = self.drivername + "://"
if self.username is not None:
s += _rfc_1738_quote(self.username)
if self.password is not None:
s += ":" + ("***" if hide_password else _rfc_1738_quote(self.password))
s += "@"
if self.host is not None:
if ":" in self.host:
s += "[%s]" % self.host
else:
s += self.host
if self.port is not None:
s += ":" + str(self.port)
if self.database is not None:
s += "/" + self.database
if self.query:
keys = list(self.query)
keys.sort()
s += "?" + "&".join("%s=%s" % (k, self.query[k]) for k in keys)
return s
def __str__(self):
return self.__to_string__(hide_password=False)
def __repr__(self):
return self.__to_string__()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (
isinstance(other, URL)
and self.drivername == other.drivername
and self.username == other.username
and self.password == other.password
and self.host == other.host
and self.database == other.database
and self.query == other.query
)
def get_backend_name(self):
if "+" not in self.drivername:
return self.drivername
else:
return self.drivername.split("+")[0]
def get_driver_name(self):
if "+" not in self.drivername:
return self.get_dialect().driver
else:
return self.drivername.split("+")[1]
def get_dialect(self):
"""Return the SQLAlchemy database dialect class corresponding
to this URL's driver name.
"""
if "+" not in self.drivername:
name = self.drivername
else:
name = self.drivername.replace("+", ".")
cls = registry.load(name)
# check for legacy dialects that
# would return a module with 'dialect' as the
# actual class
if (
hasattr(cls, "dialect")
and isinstance(cls.dialect, type)
and issubclass(cls.dialect, Dialect)
):
return cls.dialect
else:
return cls
def translate_connect_args(self, names=[], **kw):
"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ["host", "database", "username", "password", "port"]
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated
def make_url(name_or_url):
"""Given a string or unicode instance, produce a new URL instance.
The given string is parsed according to the RFC 1738 spec. If an
existing URL object is passed, just returns the object.
"""
if isinstance(name_or_url, basestring):
return _parse_rfc1738_args(name_or_url)
else:
return name_or_url
def _parse_rfc1738_args(name):
pattern = re.compile(
r"""
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>.*))?
@)?
(?:
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)?
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>.*))?
""",
re.X,
)
m = pattern.match(name)
if m is not None:
components = m.groupdict()
if components["database"] is not None:
tokens = components["database"].split("?", 2)
components["database"] = tokens[0]
query = (len(tokens) > 1 and dict(parse_qsl(tokens[1]))) or None
if PY2 and query is not None:
query = dict((k.encode("ascii"), query[k]) for k in query)
else:
query = None
components["query"] = query
if components["username"] is not None:
components["username"] = _rfc_1738_unquote(components["username"])
if components["password"] is not None:
components["password"] = _rfc_1738_unquote(components["password"])
ipv4host = components.pop("ipv4host")
ipv6host = components.pop("ipv6host")
components["host"] = ipv4host or ipv6host
name = components.pop("name")
return URL(name, **components)
else:
raise ArgumentError("Could not parse rfc1738 URL from string '%s'" % name)
def _rfc_1738_quote(text):
return re.sub(r"[:@/]", lambda m: "%%%X" % ord(m.group(0)), text)
def _rfc_1738_unquote(text):
return unquote(text)
def _parse_keyvalue_args(name):
m = re.match(r"(\w+)://(.*)", name)
if m is not None:
(name, args) = m.group(1, 2)
opts = dict(parse_qsl(args))
return URL(name, *opts)
else:
return None
| mit | c98c71018f54defc27cf54d768be3809 | 29.508065 | 87 | 0.555644 | 4.145753 | false | false | false | false |
sdispater/orator | orator/orm/relations/relation.py | 1 | 5956 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from ...query.expression import QueryExpression
from ..collection import Collection
from ..builder import Builder
class Relation(object):
_constraints = True
def __init__(self, query, parent):
"""
:param query: A Builder instance
:type query: orm.orator.Builder
:param parent: The parent model
:type parent: Model
"""
self._query = query
self._parent = parent
self._related = query.get_model()
self._extra_query = None
self.add_constraints()
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
raise NotImplementedError
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
raise NotImplementedError
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
raise NotImplementedError
def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
raise NotImplementedError
def get_results(self):
"""
Get the results of the relationship.
"""
raise NotImplementedError
def get_eager(self):
"""
Get the relationship for eager loading.
:rtype: Collection
"""
return self.get()
def touch(self):
"""
Touch all of the related models for the relationship.
"""
column = self.get_related().get_updated_at_column()
self.raw_update({column: self.get_related().fresh_timestamp()})
def raw_update(self, attributes=None):
"""
Run a raw update against the base query.
:type attributes: dict
:rtype: int
"""
if attributes is None:
attributes = {}
if self._query is not None:
return self._query.update(attributes)
def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
query.select(QueryExpression("COUNT(*)"))
key = self.wrap(self.get_qualified_parent_key_name())
return query.where(self.get_has_compare_key(), "=", QueryExpression(key))
@classmethod
@contextmanager
def no_constraints(cls, with_subclasses=False):
"""
Runs a callback with constraints disabled on the relation.
"""
cls._constraints = False
if with_subclasses:
for klass in cls.__subclasses__():
klass._constraints = False
try:
yield cls
except Exception:
raise
finally:
cls._constraints = True
if with_subclasses:
for klass in cls.__subclasses__():
klass._constraints = True
def get_keys(self, models, key=None):
"""
Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list
"""
return list(
set(
map(
lambda value: value.get_attribute(key) if key else value.get_key(),
models,
)
)
)
def get_query(self):
return self._query
def get_base_query(self):
return self._query.get_query()
def merge_query(self, query):
if isinstance(query, Builder):
query = query.get_query()
self._query.merge(query)
def get_parent(self):
return self._parent
def get_qualified_parent_key_name(self):
return self._parent.get_qualified_key_name()
def get_related(self):
return self._related
def created_at(self):
"""
Get the name of the "created at" column.
:rtype: str
"""
return self._parent.get_created_at_column()
def updated_at(self):
"""
Get the name of the "updated at" column.
:rtype: str
"""
return self._parent.get_updated_at_column()
def get_related_updated_at(self):
"""
Get the name of the related model's "updated at" column.
:rtype: str
"""
return self._related.get_updated_at_column()
def wrap(self, value):
"""
Wrap the given value with the parent's query grammar.
:rtype: str
"""
return self._parent.new_query().get_query().get_grammar().wrap(value)
def set_parent(self, parent):
self._parent = parent
def set_extra_query(self, query):
self._extra_query = query
def new_query(self, related=None):
if related is None:
related = self._related
query = related.new_query()
if self._extra_query:
query.merge(self._extra_query.get_query())
return query
def new_instance(self, model, **kwargs):
new = self._new_instance(model, **kwargs)
if self._extra_query:
new.set_extra_query(self._extra_query)
return new
def __dynamic(self, method):
attribute = getattr(self._query, method)
def call(*args, **kwargs):
result = attribute(*args, **kwargs)
if result is self._query:
return self
return result
if not callable(attribute):
return attribute
return call
def __getattr__(self, item):
return self.__dynamic(item)
| mit | e540ee5c71dc0edf42cbd391196471b4 | 22.919679 | 87 | 0.551209 | 4.578017 | false | false | false | false |
terrencepreilly/darglint | bin/bnf_to_cnf/tests/test_node.py | 1 | 5041 | from unittest import (
TestCase,
)
from bnf_to_cnf.node import (
Node,
NodeType,
)
from bnf_to_cnf.parser import (
Parser,
)
from utils import (
random_string,
)
MAX_REPS = 100
class NodeTest(TestCase):
def test_str(self):
node = Node(
NodeType.GRAMMAR,
children=[
Node(
NodeType.PRODUCTION,
children=[
Node(
NodeType.SYMBOL,
value='header',
),
Node(
NodeType.EXPRESSION,
children=[
Node(
NodeType.SEQUENCE,
children=[
Node(
NodeType.SYMBOL,
value='arg',
)
],
),
Node(
NodeType.SEQUENCE,
children=[
Node(
NodeType.SYMBOL,
value='returns',
)
],
),
],
),
],
),
Node(
NodeType.PRODUCTION,
children=[
Node(
NodeType.SYMBOL,
value='arg',
),
Node(
NodeType.EXPRESSION,
children=[
Node(
NodeType.SEQUENCE,
children=[
Node(
NodeType.TERMINAL,
value='"Arg"',
),
],
),
],
),
],
),
],
)
self.assertEqual(
str(node),
'\n'.join([
'<header> ::= <arg> | <returns>',
'<arg> ::= "Arg"',
]),
)
def test_terminals_equal(self):
for _ in range(MAX_REPS):
name = random_string()
other_name = random_string()
while name == other_name:
name = random_string()
self.assertTrue(
Node(NodeType.TERMINAL, value=name).equals(
Node(NodeType.TERMINAL, value=name)
)
)
self.assertFalse(
Node(NodeType.TERMINAL, value=name).equals(
Node(NodeType.TERMINAL, value=other_name)
),
)
def test_grammars_equal(self):
grammarA = '<A> ::= "b" | "c"'
self.assertTrue(
Parser().parse(grammarA).equals(
Parser().parse(grammarA)
)
)
grammarB = (
'<Q> ::= "chicken"\n'
'<D> ::= "sargh"'
)
self.assertTrue(
Parser().parse(grammarB).equals(
Parser().parse(grammarB)
),
)
self.assertFalse(
Parser().parse(grammarA).equals(
Parser().parse(grammarB)
),
)
def test_empty_nodes_equal(self):
for node_type in [
NodeType.SEQUENCE, NodeType.GRAMMAR, NodeType.EXPRESSION
]:
self.assertTrue(
Node(node_type, children=[]).equals(
Node(node_type, children=[])
),
)
def test_external_filename_preserved_in_both_python_and_bnf(self):
external = (
'from darglint.parse.identifiers import (\n'
' ArgumentIdentifier,\n'
' NoqaIdentifier,\n'
')'
)
grammar = f'''
{external}
<A> ::= "A"
'''
node = Parser().parse(grammar)
self.assertTrue(external in str(node))
self.assertTrue(external in node.to_python())
def test_probability_passed_to_python_production(self):
grammar = f'''
<start>
::= 70 <A> <A>
| 30 <A> <B>
<A> ::= "A"
<B> ::= "B"
'''
node = Parser().parse(grammar)
self.assertEqual(
'([], "A", "A", 70)',
node.children[0].children[1].children[0].to_python(),
)
| mit | 13cffd62549f74f8f66a2e630d94c250 | 28.828402 | 70 | 0.323349 | 5.588692 | false | true | false | false |
ui/django-post_office | post_office/tasks.py | 1 | 1534 | """
Only define the tasks and handler if we can import celery.
This allows the module to be imported in environments without Celery, for
example by other task queue systems such as Huey, which use the same pattern
of auto-discovering tasks in "tasks" submodules.
"""
import datetime
from django.utils.timezone import now
from post_office.mail import send_queued_mail_until_done
from post_office.utils import cleanup_expired_mails
from .settings import get_celery_enabled
try:
if get_celery_enabled():
from celery import shared_task
else:
raise NotImplementedError()
except (ImportError, NotImplementedError):
def queued_mail_handler(sender, **kwargs):
"""
To be called by :func:`post_office.signals.email_queued.send()` for triggering asynchronous
mail delivery – if provided by an external queue, such as Celery.
"""
else:
@shared_task(ignore_result=True)
def send_queued_mail(*args, **kwargs):
"""
To be called by the Celery task manager.
"""
send_queued_mail_until_done()
def queued_mail_handler(sender, **kwargs):
"""
Trigger an asynchronous mail delivery.
"""
send_queued_mail.delay()
@shared_task(ignore_result=True)
def cleanup_mail(*args, **kwargs):
days = kwargs.get('days', 90)
cutoff_date = now() - datetime.timedelta(days)
delete_attachments = kwargs.get('delete_attachments', True)
cleanup_expired_mails(cutoff_date, delete_attachments)
| mit | 96c46f8588f78c53b2e3a725cf01c1c6 | 32.304348 | 99 | 0.678198 | 4.074468 | false | false | false | false |
terrencepreilly/darglint | darglint/parse/google.py | 1 | 5411 | import inspect
from typing import (
List,
)
from functools import (
reduce,
)
from ..custom_assert import Assert
from ..token import (
Token,
TokenType,
KEYWORDS,
)
from .cyk import (
parse as cyk_parse,
)
from ..node import (
CykNode,
)
from .combinator import (
parser_combinator,
)
from .long_description import (
parse as long_description_parse,
)
from .grammars.google_arguments_section import ArgumentsGrammar
from .grammars.google_raises_section import RaisesGrammar
from .grammars.google_returns_section import ReturnsGrammar
from .grammars.google_returns_section_without_type import (
ReturnsWithoutTypeGrammar,
)
from .grammars.google_short_description import ShortDescriptionGrammar
from .grammars.google_yields_section import YieldsGrammar
from .grammars.google_yields_section_without_type import (
YieldsWithoutTypeGrammar,
)
def _get_split_end_with_indents(tokens, i):
# type: (List[Token], int) -> int
"""Return the index of the end of this split, or 0.
Args:
tokens: A list of tokens.
i: The current index.
Returns:
If i is the start of a split, return the index of the
token after the end of the split (or the last token, if
it's the end of the docstring.) If we're not at a split,
return 0.
"""
newline_count = 0
newline_run = 0
highest_newline_run = 0
j = i
while j < len(tokens):
if tokens[j].token_type == TokenType.NEWLINE:
newline_count += 1
newline_run += 1
if newline_run > highest_newline_run:
highest_newline_run = newline_run
elif tokens[j].token_type == TokenType.INDENT:
newline_run = 0
else:
break
j += 1
# Back up so that we don't remove indents on the same line as
# the encountered text.
while (j < len(tokens)
and j > 1
and tokens[j - 1].token_type == TokenType.INDENT):
j -= 1
# TODO: Do we want to check for keywords before assuming a
# new section? If we have line-separated sections in args,
# which do not have indents, then we will parse incorrectly.
if newline_count < 2:
return 0
# If there are two newlines in a row, we have a break, no
# matter what.
if highest_newline_run > 1:
return j
# If there were not 2+ newlines in a row, (i.e. there were
# indented lines in with these), then it's only a new section
# if it starts with a keyword.
if (j < len(tokens)
and tokens[j].token_type in KEYWORDS
and tokens[j - 1].token_type == TokenType.NEWLINE):
return j
return 0
def top_parse(tokens):
# type: (List[Token]) -> List[List[Token]]
all_sections = list()
curr = 0
# Strip leading newlines.
while curr < len(tokens) and tokens[curr].token_type == TokenType.NEWLINE:
curr += 1
prev = curr
while curr < len(tokens):
split_end = _get_split_end_with_indents(tokens, curr)
if split_end > curr:
if tokens[prev:curr]:
all_sections.append(
tokens[prev:curr]
)
curr = split_end
prev = curr
else:
curr += 1
last_section = tokens[prev:curr]
if last_section:
all_sections.append(last_section)
return all_sections
def _match(token):
"""Match the given token from the given section to a set of grammars.
Args:
token: The token to match. This should hint at what sections
could possibly be here.
Returns:
A list of grammars to be tried in order.
"""
tt_lookup = {
TokenType.RETURNS: [
ReturnsGrammar,
ReturnsWithoutTypeGrammar,
long_description_parse,
],
TokenType.ARGUMENTS: [
ArgumentsGrammar,
long_description_parse,
],
TokenType.YIELDS: [
YieldsGrammar,
YieldsWithoutTypeGrammar,
long_description_parse,
],
TokenType.RAISES: [
RaisesGrammar,
long_description_parse,
],
}
return tt_lookup.get(token.token_type, [long_description_parse])
def lookup(section, section_index=-1):
Assert(len(section) > 0, 'Expected non-empty section.')
grammars = _match(section[0])
if section_index == 0:
return [ShortDescriptionGrammar] + grammars
return grammars
def combinator(*args):
def inner(*nodes):
if len(nodes) == 1:
return CykNode(
symbol='docstring',
lchild=nodes[0],
)
elif len(nodes) == 2:
return CykNode(
symbol='docstring',
lchild=nodes[0],
rchild=nodes[1],
)
if args:
return reduce(inner, args)
else:
# The arguments are empty, so we return an
# empty docstring.
return CykNode(symbol='docstring')
def parse(tokens):
def mapped_lookup(section, section_index=-1):
for grammar in lookup(section, section_index):
if inspect.isclass(grammar):
yield lambda x: cyk_parse(grammar, x)
else:
yield grammar
return parser_combinator(top_parse, mapped_lookup, combinator, tokens)
| mit | 8fed88de075d0f5ffb87684cf61b6bc9 | 26.328283 | 78 | 0.593236 | 3.972834 | false | false | false | false |
terrencepreilly/darglint | bin/bnf_to_cnf/bnf_to_cnf/parser.py | 1 | 2656 |
from lark import (
Lark,
Tree,
)
from .node import (
Node,
NodeType,
)
class Parser(object):
grammar = r'''
start: grammar
grammar: imports? external_imports? name? start_expression? production+
production: annotations? symbol _OPER expression
_OPER: "::="
expression: sequence (_BAR sequence)*
_BAR: "|"
sequence: probability? annotations? (symbol | TERMINAL) (_WHITESPACE (symbol | TERMINAL))*
TERMINAL: "\"" (LETTER | ESCAPED | NUMBER | "_" | "-" | ":")+ "\""
| "ε"
ESCAPED: "\\" ("." | "," | "*" | "^" | "("
| ")" | "+" | "-" | "/" | "\""
| " " | "]" | "[" | "|")
probability: NUMBER+
start_expression: _START symbol
_START: "start:"
name: _GRAMMAR NAME
NAME: LETTER+
_GRAMMAR: "Grammar:"
external_imports: external_import+
external_import: _FROM FILENAME _IMPORT _LP items _RP
_FROM: "from"
_LP: "("
_RP: ")"
items: ITEM ","?
| ITEM "," items
ITEM: /\w+/
imports: import+
import: _IMPORT FILENAME
FILENAME: /(\w|\\|\.|-|_)+/
_IMPORT: "import"
annotations: annotation+
annotation: _AT IDENT
_AT: "@"
symbol: _LB IDENT _RB
_LB: "<"
_RB: ">"
IDENT: LETTER (LETTER | NUMBER | "_" | "-")*
%import common.LETTER
%import common.NUMBER
_COMMENT: /#[^\n]*/
%ignore _COMMENT
_WHITESPACE: (" " | "\n" | "\t")+
%ignore _WHITESPACE
''' # noqa: E501
def __init__(self):
self.delegate = Lark(self.grammar)
def parse(self, value: str) -> Node:
tree = self.delegate.parse(value)
return Node.from_lark_tree(tree)
def parse_production(self, value: str) -> Node:
"""Parse just an production.
Args:
value: The string to parse.
Throws:
Exception: If there is more than a single production in the
value.
Returns:
A node which is the head of the production (not the grammar.)
"""
if '\n' in value:
raise Exception(
'There should only be a single product, but '
'a newline is present.'
)
grammar = self.parse(value)
if grammar.children[0].node_type == NodeType.PRODUCTION:
production = grammar.children[0]
else:
production = grammar.children[1]
grammar.children = list()
return production
| mit | 2005b80665f48e84ac315e9bd1fda763 | 24.04717 | 98 | 0.484746 | 4.041096 | false | false | false | false |
terrencepreilly/darglint | darglint/lex.py | 1 | 6882 | """Defines a function for lexing a comment, `lex`."""
from typing import (
Iterator,
List,
Optional,
)
from .custom_assert import Assert
from .peaker import Peaker
from .token import Token, TokenType
from .config import (
get_config,
)
# These convenience functions take an optional string
# because the peaker could return None when at the end
# of the stream.
def _is_space(char):
# type: (Optional[str]) -> bool
return char == ' '
def _is_newline(char):
# type: (Optional[str]) -> bool
return char == '\n'
def _is_colon(char):
# type: (Optional[str]) -> bool
return char == ':'
def _is_hash(char):
# type: (Optional[str]) -> bool
return char == '#'
def _is_separator(char):
# type: (Optional[str]) -> bool
"""Check whether if `char` is a separator other than newline or space.
Args:
char: The character to check.
Returns:
true if `char` is a separator other than newline or space.
"""
if char is None:
return False
return char.isspace() and not (_is_space(char) or _is_newline(char))
def _is_lparen(char):
# type: (Optional[str]) -> bool
return char == '('
def _is_rparen(char):
# type: (Optional[str]) -> bool
return char == ')'
def _is_hyphen(char):
# type: (Optional[str]) -> bool
return char == '-'
def _is_word(char):
# type: (str) -> bool
return not any([
_is_space(char),
_is_newline(char),
_is_colon(char),
_is_separator(char),
_is_hash(char),
_is_lparen(char),
_is_rparen(char),
])
def lex(program):
# type: (str) -> Iterator[Token]
"""Create a stream of tokens from the string.
Args:
program: The program to lex, as a string.
Yields:
Tokens lexed from the string.
"""
extra = '' # Extra characters which are pulled but unused from a check.
peaker = Peaker((x for x in program or [])) # the stream
line_number = 0
# Set the amount of spaces which count as an indent.
config = get_config()
while peaker.has_next():
# Each of the following conditions must move the stream
# forward and -- excepting separators -- yield a token.
if _is_space(peaker.peak()):
spaces = ''.join(peaker.take_while(_is_space))
for _ in range(len(spaces) // config.indentation):
yield Token(' ' * 4, TokenType.INDENT, line_number)
elif _is_newline(peaker.peak()):
value = peaker.next()
yield Token(value, TokenType.NEWLINE, line_number)
line_number += 1
elif _is_colon(peaker.peak()):
value = peaker.next()
yield Token(value, TokenType.COLON, line_number)
elif _is_separator(peaker.peak()):
peaker.take_while(_is_separator)
elif _is_hash(peaker.peak()):
value = peaker.next()
yield Token(value, TokenType.HASH, line_number)
elif _is_lparen(peaker.peak()):
value = peaker.next()
yield Token(value, TokenType.LPAREN, line_number)
elif _is_rparen(peaker.peak()):
value = peaker.next()
yield Token(value, TokenType.RPAREN, line_number)
elif _is_hyphen(peaker.peak()):
value = ''.join(peaker.take_while(_is_word))
if value.count('-') == len(value):
yield Token(value, TokenType.HEADER, line_number)
else:
yield Token(value, TokenType.WORD, line_number)
else:
value = ''.join(peaker.take_while(_is_word))
if extra != '':
value = extra + value
extra = ''
Assert(
len(value) > 0,
"There should be non-special characters.",
)
yield Token(value, TokenType.WORD, line_number)
KEYWORDS = {
'Args': TokenType.ARGUMENTS,
'Arguments': TokenType.ARGUMENTS,
'Yields': TokenType.YIELDS,
'Raises': TokenType.RAISES,
'Returns': TokenType.RETURNS,
'noqa': TokenType.NOQA,
'param': TokenType.ARGUMENTS,
'parameter': TokenType.ARGUMENTS,
'Parameters': TokenType.ARGUMENTS,
'arg': TokenType.ARGUMENTS,
'argument': TokenType.ARGUMENTS,
'key': TokenType.VARIABLES,
'keyword': TokenType.VARIABLES,
'var': TokenType.VARIABLES,
'ivar': TokenType.VARIABLES,
'cvar': TokenType.VARIABLES,
'type': TokenType.ARGUMENT_TYPE,
'vartype': TokenType.VARIABLE_TYPE,
'raises': TokenType.RAISES,
'yield': TokenType.YIELDS,
'yields': TokenType.YIELDS,
'ytype': TokenType.YIELD_TYPE,
'return': TokenType.RETURNS,
'returns': TokenType.RETURNS,
'rtype': TokenType.RETURN_TYPE,
'Other': TokenType.OTHER,
'Receives': TokenType.RECEIVES,
'Warns': TokenType.WARNS,
'Warnings': TokenType.WARNS,
'See': TokenType.SEE,
'Also': TokenType.ALSO,
'Notes': TokenType.NOTES,
'Examples': TokenType.EXAMPLES,
'References': TokenType.REFERENCES,
}
def condense(tokens):
# type: (Iterator[Token]) -> List[Token]
"""Condense the stream of tokens into a list consumable by CYK.
This servers two purposes:
1. It minimizes the lookup table used in the CYK algorithm.
(The CYK algorithm is a dynamic algorithm, with one dimension
in the two-dimension lookup table being determined by the number
of tokens.)
2. It applies more discriminate token types to the tokens identified
by lex. Eventually, this will be moved into the lex function.
Args:
tokens: The stream of tokens from the lex function.
Returns:
A List of tokens which have been condensed into as small a
representation as possible.
"""
ret = list() # type: List[Token]
try:
curr = next(tokens)
except StopIteration:
return ret
if curr.value in KEYWORDS:
curr.token_type = KEYWORDS[curr.value]
encountered_noqa = False
for token in tokens:
if token.token_type == TokenType.WORD and token.value in KEYWORDS:
ret.append(curr)
if token.value == 'noqa':
encountered_noqa = True
curr = Token(
token.value,
KEYWORDS[token.value],
token.line_number,
)
elif token.token_type == TokenType.WORD:
if curr.token_type == TokenType.WORD and not encountered_noqa:
curr.value += ' {}'.format(token.value)
else:
ret.append(curr)
curr = token
elif token.token_type == TokenType.NEWLINE:
ret.append(curr)
curr = token
encountered_noqa = False
else:
ret.append(curr)
curr = token
ret.append(curr)
return ret
| mit | 151fa2efa99d52889be9942bc13a523f | 27.438017 | 76 | 0.585876 | 3.795918 | false | false | false | false |
terrencepreilly/darglint | darglint/node.py | 1 | 6729 | from collections import (
deque,
)
from typing import (
Any,
Iterator,
Optional,
List,
Tuple,
)
from .token import (
Token,
TokenType,
)
WHITESPACE = {TokenType.INDENT, TokenType.NEWLINE}
# A best guess at the maximum height of a docstring tree,
# for use in recursion bounds checking.
MAX_TREE_HEIGHT = 300
class CykNode(object):
"""A node for use in a cyk parse."""
def __init__(self,
symbol,
lchild=None,
rchild=None,
value=None,
annotations=list(),
weight=0):
# type: (str, Optional[CykNode], Optional[CykNode], Optional[Token], List[Any], int) -> None # noqa: E501
self.symbol = symbol
self.lchild = lchild
self.rchild = rchild
self.value = value
self.annotations = annotations
self._line_number_cache = None # type: Optional[Tuple[int, int]]
# If there is an explicit weight, we definitely want to use
# that (there was probably a good reason it was given.)
#
# If no weight was given, but the children have weights, then
# we probably want to give preference to this node over a node
# which has no weights at all.
if weight:
self.weight = weight
else:
self.weight = max([
0,
self.lchild.weight if self.lchild else 0,
self.rchild.weight if self.rchild else 0,
])
def __repr__(self):
if hasattr(self.value, 'token_type'):
return '<{}: {}>'.format(
self.symbol,
str(self.value.token_type)[10:] if self.value else '',
)
return '<{}>'.format(self.value)
def __str__(self, indent=0):
if self.value:
ret = (
' ' * indent
+ str(self.value.token_type)
+ ': '
+ repr(self.value.value)
)
else:
ret = ' ' * indent + self.symbol
if self.annotations:
ret += ': ' + ', '.join([str(x) for x in self.annotations])
if self.lchild:
ret += '\n' + self.lchild.__str__(indent + 2)
if self.rchild:
ret += '\n' + self.rchild.__str__(indent + 2)
return ret
def in_order_traverse(self):
# type: () -> Iterator[CykNode]
if self.lchild:
yield from self.lchild.in_order_traverse()
yield self
if self.rchild:
yield from self.rchild.in_order_traverse()
def breadth_first_walk(self):
queue = deque([self])
while queue:
curr = queue.pop()
yield curr
if curr.lchild:
queue.appendleft(curr.lchild)
if curr.rchild:
queue.appendleft(curr.rchild)
def first_instance(self, symbol):
# type: (str) -> Optional['CykNode']
for node in self.breadth_first_walk():
if node.symbol == symbol:
return node
return None
def walk(self):
# type: () -> Iterator['CykNode']
yield from self.in_order_traverse()
def equals(self, other):
# type: (Optional['CykNode']) -> bool
if other is None:
return False
if self.symbol != other.symbol:
return False
if self.value != other.value:
return False
if self.lchild and not self.lchild.equals(other.lchild):
return False
if self.rchild and not self.rchild.equals(other.rchild):
return False
return True
def reconstruct_string(self, strictness=0):
# type: (int) -> str
"""Reconstruct the docstring.
This method should rebuild the docstring while fixing style
errors. The errors themselves determine how to fix the node
they apply to. (If there isn't a good fix, then it's just the
identity function.)
Args:
strictness: How strictly we should correct. If an error
doesn't match the strictness, we won't correct for
that error.
Returns:
The docstring, reconstructed.
"""
# In order to make a reasonable guess as to the whitespace
# to apply between characters, we use a 3-token sliding
# window.
window_size = 3
window = deque(maxlen=window_size) # type: deque
source = self.in_order_traverse()
# Fill the buffer.
while len(window) < window_size:
try:
node = next(source)
except StopIteration:
break
if node.value:
window.append(node.value)
if not window:
return ''
ret = window[0].value
# Slide the window, filling the return value.
while len(window) > 1:
is_whitespace = (
window[0].token_type in WHITESPACE
or window[1].token_type in WHITESPACE
)
is_colon = window[1].token_type == TokenType.COLON
if is_whitespace or is_colon:
ret += window[1].value
else:
ret += ' ' + window[1].value
found_token = False
for node in source:
if node.value:
window.append(node.value)
found_token = True
break
if not found_token:
break
if len(window) == 3:
if (window[1].token_type in WHITESPACE
or window[2].token_type in WHITESPACE
or window[2].token_type == TokenType.COLON):
ret += window[2].value
else:
ret += ' ' + window[2].value
return ret
def _get_line_numbers_cached(self, recurse=0):
# type: (int) -> Tuple[int, int]
if recurse > MAX_TREE_HEIGHT:
return (-1, -1)
if self.value:
return (self.value.line_number, self.value.line_number)
elif self._line_number_cache:
return self._line_number_cache
leftmost = -1
if self.lchild:
leftmost = self.lchild._get_line_numbers_cached(recurse + 1)[0]
rightmost = leftmost
if self.rchild:
rightmost = self.rchild._get_line_numbers_cached(recurse + 1)[1]
self._line_number_cache = (leftmost, rightmost)
return self._line_number_cache or (-1, -1)
@property
def line_numbers(self):
# type: () -> Tuple[int, int]
return self._get_line_numbers_cached()
| mit | 93b4e9bf3097118443d2f379e4bdd6a3 | 30.009217 | 114 | 0.52192 | 4.184701 | false | false | false | false |
ui/django-post_office | post_office/validators.py | 1 | 1365 | from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "Recipient Name <email@example.com>" and "email@example.com" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
| mit | 2f3223741bda3111626fe1430b4d69cc | 28.042553 | 80 | 0.666667 | 4.50495 | false | false | false | false |
has2k1/plotnine | plotnine/stats/smoothers.py | 1 | 15679 | import warnings
from contextlib import suppress
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
from patsy import dmatrices
from ..exceptions import PlotnineError, PlotnineWarning
from ..utils import get_valid_kwargs
smlowess = sm.nonparametric.lowess
def predictdf(data, xseq, **params):
"""
Make prediction on the data
This is a general function responsible for dispatching
to functions that do predictions for the specific models.
"""
methods = {
'lm': lm,
'ols': lm,
'wls': lm,
'rlm': rlm,
'glm': glm,
'gls': gls,
'lowess': lowess,
'loess': loess,
'mavg': mavg,
'gpr': gpr,
}
method = params['method']
if isinstance(method, str):
try:
method = methods[method]
except KeyError:
msg = "Method should be one of {}"
raise PlotnineError(msg.format(list(methods.keys())))
if not callable(method):
msg = ("'method' should either be a string or a function"
"with the signature `func(data, xseq, **params)`")
raise PlotnineError()
return method(data, xseq, **params)
def lm(data, xseq, **params):
"""
Fit OLS / WLS if data has weight
"""
if params['formula']:
return lm_formula(data, xseq, **params)
X = sm.add_constant(data['x'])
Xseq = sm.add_constant(xseq)
weights = data.get('weights', None)
if weights is None:
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.OLS, sm.OLS.fit)
model = sm.OLS(data['y'], X, **init_kwargs)
else:
if np.any(weights < 0):
raise ValueError(
"All weights must be greater than zero."
)
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.WLS, sm.WLS.fit)
model = sm.WLS(data['y'], X, weights=data['weight'], **init_kwargs)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(Xseq)
if params['se']:
alpha = 1 - params['level']
prstd, iv_l, iv_u = wls_prediction_std(
results, Xseq, alpha=alpha)
data['se'] = prstd
data['ymin'] = iv_l
data['ymax'] = iv_u
return data
def lm_formula(data, xseq, **params):
"""
Fit OLS / WLS using a formula
"""
formula = params['formula']
eval_env = params['enviroment']
weights = data.get('weight', None)
if weights is None:
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.OLS, sm.OLS.fit)
model = smf.ols(
formula,
data,
eval_env=eval_env,
**init_kwargs
)
else:
if np.any(weights < 0):
raise ValueError(
"All weights must be greater than zero."
)
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.OLS, sm.OLS.fit)
model = smf.wls(
formula,
data,
weights=weights,
eval_env=eval_env,
**init_kwargs
)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(data)
if params['se']:
_, predictors = dmatrices(formula, data, eval_env=eval_env)
alpha = 1 - params['level']
prstd, iv_l, iv_u = wls_prediction_std(
results, predictors, alpha=alpha)
data['se'] = prstd
data['ymin'] = iv_l
data['ymax'] = iv_u
return data
def rlm(data, xseq, **params):
"""
Fit RLM
"""
if params['formula']:
return rlm_formula(data, xseq, **params)
X = sm.add_constant(data['x'])
Xseq = sm.add_constant(xseq)
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.RLM, sm.RLM.fit)
model = sm.RLM(data['y'], X, **init_kwargs)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(Xseq)
if params['se']:
warnings.warn("Confidence intervals are not yet implemented"
" for RLM smoothing.", PlotnineWarning)
return data
def rlm_formula(data, xseq, **params):
"""
Fit RLM using a formula
"""
eval_env = params['enviroment']
formula = params['formula']
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.RLM, sm.RLM.fit)
model = smf.rlm(
formula,
data,
eval_env=eval_env,
**init_kwargs
)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(data)
if params['se']:
warnings.warn("Confidence intervals are not yet implemented"
" for RLM smoothing.", PlotnineWarning)
return data
def gls(data, xseq, **params):
"""
Fit GLS
"""
if params['formula']:
return gls_formula(data, xseq, **params)
X = sm.add_constant(data['x'])
Xseq = sm.add_constant(xseq)
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.OLS, sm.OLS.fit)
model = sm.GLS(data['y'], X, **init_kwargs)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(Xseq)
if params['se']:
alpha = 1 - params['level']
prstd, iv_l, iv_u = wls_prediction_std(
results, Xseq, alpha=alpha)
data['se'] = prstd
data['ymin'] = iv_l
data['ymax'] = iv_u
return data
def gls_formula(data, xseq, **params):
"""
Fit GLL using a formula
"""
eval_env = params['enviroment']
formula = params['formula']
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.GLS, sm.GLS.fit)
model = smf.gls(
formula,
data,
eval_env=eval_env,
**init_kwargs
)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(data)
if params['se']:
_, predictors = dmatrices(formula, data, eval_env=eval_env)
alpha = 1 - params['level']
prstd, iv_l, iv_u = wls_prediction_std(
results, predictors, alpha=alpha)
data['se'] = prstd
data['ymin'] = iv_l
data['ymax'] = iv_u
return data
def glm(data, xseq, **params):
"""
Fit GLM
"""
if params['formula']:
return glm_formula(data, xseq, **params)
X = sm.add_constant(data['x'])
Xseq = sm.add_constant(xseq)
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.GLM, sm.GLM.fit)
model = sm.GLM(data['y'], X, **init_kwargs)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(Xseq)
if params['se']:
prediction = results.get_prediction(Xseq)
ci = prediction.conf_int(1 - params['level'])
data['ymin'] = ci[:, 0]
data['ymax'] = ci[:, 1]
return data
def glm_formula(data, xseq, **params):
"""
Fit with GLM formula
"""
eval_env = params['enviroment']
init_kwargs, fit_kwargs = separate_method_kwargs(
params['method_args'], sm.GLM, sm.GLM.fit)
model = smf.glm(
params['formula'],
data,
eval_env=eval_env,
**init_kwargs
)
results = model.fit(**fit_kwargs)
data = pd.DataFrame({'x': xseq})
data['y'] = results.predict(data)
if params['se']:
df = pd.DataFrame({'x': xseq})
prediction = results.get_prediction(df)
ci = prediction.conf_int(1 - params['level'])
data['ymin'] = ci[:, 0]
data['ymax'] = ci[:, 1]
return data
def lowess(data, xseq, **params):
"""
Lowess fitting
"""
for k in ('is_sorted', 'return_sorted'):
with suppress(KeyError):
del params['method_args'][k]
warnings.warn(
f"Smoothing method argument: {k}, has been ignored."
)
result = smlowess(data['y'], data['x'],
frac=params['span'],
is_sorted=True,
**params['method_args'])
data = pd.DataFrame({
'x': result[:, 0],
'y': result[:, 1]})
if params['se']:
warnings.warn(
"Confidence intervals are not yet implemented"
" for lowess smoothings.",
PlotnineWarning
)
return data
def loess(data, xseq, **params):
"""
Loess smoothing
"""
try:
from skmisc.loess import loess as loess_klass
except ImportError:
raise PlotnineError(
"For loess smoothing, install 'scikit-misc'")
try:
weights = data['weight']
except KeyError:
weights = None
kwargs = params['method_args']
extrapolate = (min(xseq) < min(data['x']) or
max(xseq) > max(data['x']))
if 'surface' not in kwargs and extrapolate:
# Creates a loess model that allows extrapolation
# when making predictions
kwargs['surface'] = 'direct'
warnings.warn(
"Making prediction outside the data range, "
"setting loess control parameter `surface='direct'`.",
PlotnineWarning)
if 'span' not in kwargs:
kwargs['span'] = params['span']
lo = loess_klass(data['x'], data['y'], weights, **kwargs)
lo.fit()
data = pd.DataFrame({'x': xseq})
if params['se']:
alpha = 1 - params['level']
prediction = lo.predict(xseq, stderror=True)
ci = prediction.confidence(alpha=alpha)
data['se'] = prediction.stderr
data['ymin'] = ci.lower
data['ymax'] = ci.upper
else:
prediction = lo.predict(xseq, stderror=False)
data['y'] = prediction.values
return data
def mavg(data, xseq, **params):
"""
Fit moving average
"""
window = params['method_args']['window']
# The first average comes after the full window size
# has been swept over
rolling = data['y'].rolling(**params['method_args'])
y = rolling.mean()[window:]
n = len(data)
stderr = rolling.std()[window:]
x = data['x'][window:]
data = pd.DataFrame({'x': x, 'y': y})
data.reset_index(inplace=True, drop=True)
if params['se']:
df = n - window # Original - Used
data['ymin'], data['ymax'] = tdist_ci(
y, df, stderr, params['level'])
data['se'] = stderr
return data
def gpr(data, xseq, **params):
"""
Fit gaussian process
"""
try:
from sklearn import gaussian_process
except ImportError:
raise PlotnineError(
"To use gaussian process smoothing, "
"You need to install scikit-learn.")
kwargs = params['method_args']
if not kwargs:
warnings.warn(
"See sklearn.gaussian_process.GaussianProcessRegressor "
"for parameters to pass in as 'method_args'", PlotnineWarning)
regressor = gaussian_process.GaussianProcessRegressor(**kwargs)
X = np.atleast_2d(data['x']).T
n = len(data)
Xseq = np.atleast_2d(xseq).T
regressor.fit(X, data['y'])
data = pd.DataFrame({'x': xseq})
if params['se']:
y, stderr = regressor.predict(Xseq, return_std=True)
data['y'] = y
data['se'] = stderr
data['ymin'], data['ymax'] = tdist_ci(
y, n-1, stderr, params['level'])
else:
data['y'] = regressor.predict(Xseq, return_std=True)
return data
def tdist_ci(x, df, stderr, level):
"""
Confidence Intervals using the t-distribution
"""
q = (1 + level)/2
if df is None:
delta = stats.norm.ppf(q) * stderr
else:
delta = stats.t.ppf(q, df) * stderr
return x - delta, x + delta
# Override wls_prediction_std from statsmodels to calculate the confidence
# interval instead of only the prediction interval
def wls_prediction_std(res, exog=None, weights=None, alpha=0.05,
interval='confidence'):
"""
Calculate standard deviation and confidence interval
Applies to WLS and OLS, not to general GLS,
that is independently but not identically distributed observations
Parameters
----------
res : regression result instance
results of WLS or OLS regression required attributes see notes
exog : array_like (optional)
exogenous variables for points to predict
weights : scalar or array_like (optional)
weights as defined for WLS (inverse of variance of observation)
alpha : float (default: alpha = 0.05)
confidence level for two-sided hypothesis
interval : str
Type of interval to compute. One of "confidence" or "prediction"
Returns
-------
predstd : array_like, 1d
standard error of prediction
same length as rows of exog
interval_l, interval_u : array_like
lower und upper confidence bounds
Notes
-----
The result instance needs to have at least the following
res.model.predict() : predicted values or
res.fittedvalues : values used in estimation
res.cov_params() : covariance matrix of parameter estimates
If exog is 1d, then it is interpreted as one observation,
i.e. a row vector.
testing status: not compared with other packages
References
----------
Greene p.111 for OLS, extended to WLS by analogy
"""
# work around current bug:
# fit doesn't attach results to model, predict broken
# res.model.results
covb = res.cov_params()
if exog is None:
exog = res.model.exog
predicted = res.fittedvalues
if weights is None:
weights = res.model.weights
else:
exog = np.atleast_2d(exog)
if covb.shape[1] != exog.shape[1]:
raise ValueError('wrong shape of exog')
predicted = res.model.predict(res.params, exog)
if weights is None:
weights = 1.
else:
weights = np.asarray(weights)
if weights.size > 1 and len(weights) != exog.shape[0]:
raise ValueError('weights and exog do not have matching shape')
# full covariance:
# predvar = res3.mse_resid + np.diag(np.dot(X2,np.dot(covb,X2.T)))
# predication variance only
predvar = res.mse_resid/weights
ip = (exog * np.dot(covb, exog.T).T).sum(1)
if interval == 'confidence':
predstd = np.sqrt(ip)
elif interval == 'prediction':
predstd = np.sqrt(ip + predvar)
tppf = stats.t.isf(alpha/2., res.df_resid)
interval_u = predicted + tppf * predstd
interval_l = predicted - tppf * predstd
return predstd, interval_l, interval_u
def separate_method_kwargs(method_args, init_method, fit_method):
"""
Categorise kwargs passed to the stat
Some args are of the init method others for the fit method
The separation is done by introspecting the init & fit methods
"""
# inspect the methods
init_kwargs = get_valid_kwargs(init_method, method_args)
fit_kwargs = get_valid_kwargs(fit_method, method_args)
# Warn about unknown kwargs
known_kwargs = set(init_kwargs) | set(fit_kwargs)
unknown_kwargs = set(method_args) - known_kwargs
if unknown_kwargs:
raise PlotnineError(
"The following method arguments could not be recognised: "
f"{list(unknown_kwargs)}"
)
return init_kwargs, fit_kwargs
| mit | fe6e2665f3c4277103df806bc7dbbbfa | 27.098566 | 79 | 0.576376 | 3.659897 | false | false | false | false |
psd-tools/psd-tools | tests/psd_tools/utils.py | 2 | 1629 | from __future__ import absolute_import, unicode_literals
import logging
import fnmatch
import os
import tempfile
from psd_tools.utils import trimmed_repr
logging.basicConfig(level=logging.DEBUG)
# Use maccyrillic encoding.
CYRILLIC_FILES = {
'layer_mask_data.psb',
'layer_mask_data.psd',
'layer_params.psb',
'layer_params.psd',
'layer_comps.psb',
'layer_comps.psd',
}
# Unknown encoding.
OTHER_FILES = {
'advanced-blending.psd',
'effect-stroke-gradient.psd',
'layer_effects.psd',
'patterns.psd',
'fill_adjustments.psd',
'blend-and-clipping.psd',
'clipping-mask2.psd',
}
TEST_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def find_files(pattern='*.ps*', root=TEST_ROOT):
for root, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def full_name(filename):
return os.path.join(TEST_ROOT, 'psd_files', filename)
def all_files():
return [f for f in find_files() if f.find('third-party-psds') < 0]
def check_write_read(element, *args, **kwargs):
with tempfile.TemporaryFile() as f:
element.write(f, *args, **kwargs)
f.flush()
f.seek(0)
new_element = element.read(f, *args, **kwargs)
assert element == new_element, '%s vs %s' % (element, new_element)
def check_read_write(cls, data, *args, **kwargs):
element = cls.frombytes(data, *args, **kwargs)
new_data = element.tobytes(*args, **kwargs)
assert data == new_data, '%s vs %s' % (
trimmed_repr(data), trimmed_repr(new_data)
)
| mit | 624b88f1f3654de326522ac529e402a7 | 25.274194 | 71 | 0.648864 | 3.169261 | false | false | false | false |
psd-tools/psd-tools | src/psd_tools/psd/patterns.py | 2 | 7229 | """
Patterns structure.
"""
from __future__ import absolute_import, unicode_literals
import attr
import io
import logging
from psd_tools.compression import compress, decompress
from psd_tools.constants import ColorMode, Compression
from psd_tools.psd.base import BaseElement, ListElement
from psd_tools.validators import in_
from psd_tools.utils import (
read_fmt,
write_fmt,
read_length_block,
write_length_block,
is_readable,
write_bytes,
read_unicode_string,
write_unicode_string,
read_pascal_string,
write_pascal_string,
)
logger = logging.getLogger(__name__)
class Patterns(ListElement):
"""
List of Pattern structure. See
:py:class:`~psd_tools.psd.patterns.Pattern`.
"""
@classmethod
def read(cls, fp, **kwargs):
items = []
while is_readable(fp, 4):
data = read_length_block(fp, padding=4)
with io.BytesIO(data) as f:
items.append(Pattern.read(f))
return cls(items)
def write(self, fp, **kwargs):
written = 0
for item in self:
written += write_length_block(
fp, lambda f: item.write(f), padding=4
)
return written
@attr.s(repr=False, slots=True)
class Pattern(BaseElement):
"""
Pattern structure.
.. py:attribute:: version
.. py:attribute:: image_mode
See :py:class:`ColorMode`
.. py:attribute:: point
Size in tuple.
.. py:attribute:: name
`str` name of the pattern.
.. py:attribute:: pattern_id
ID of this pattern.
.. py:attribute:: color_table
Color table if the mode is INDEXED.
.. py:attribute:: data
See :py:class:`VirtualMemoryArrayList`
"""
version = attr.ib(default=1, type=int)
image_mode = attr.ib(
default=ColorMode, converter=ColorMode, validator=in_(ColorMode)
)
point = attr.ib(default=None)
name = attr.ib(default='', type=str)
pattern_id = attr.ib(default='', type=str)
color_table = attr.ib(default=None)
data = attr.ib(default=None)
@classmethod
def read(cls, fp, **kwargs):
version = read_fmt('I', fp)[0]
assert version == 1, 'Invalid version %d' % (version)
image_mode = ColorMode(read_fmt('I', fp)[0])
point = read_fmt('2h', fp)
name = read_unicode_string(fp)
pattern_id = read_pascal_string(fp, encoding='ascii', padding=1)
color_table = None
if image_mode == ColorMode.INDEXED:
color_table = [read_fmt("3B", fp) for i in range(256)]
read_fmt('4x', fp)
data = VirtualMemoryArrayList.read(fp)
return cls(
version, image_mode, point, name, pattern_id, color_table, data
)
def write(self, fp, **kwargs):
written = write_fmt(fp, '2I', self.version, self.image_mode.value)
written += write_fmt(fp, '2h', *self.point)
written += write_unicode_string(fp, self.name)
written += write_pascal_string(
fp, self.pattern_id, encoding='ascii', padding=1
)
if self.color_table:
for row in self.color_table:
written += write_fmt(fp, '3B', *row)
written += write_fmt(fp, '4x')
written += self.data.write(fp)
return written
@attr.s(repr=False, slots=True)
class VirtualMemoryArrayList(BaseElement):
"""
VirtualMemoryArrayList structure. Container of channels.
.. py:attribute:: version
.. py:attribute:: rectangle
Tuple of `int`
.. py:attribute:: channels
List of :py:class:`VirtualMemoryArray`
"""
version = attr.ib(default=3, type=int)
rectangle = attr.ib(default=None)
channels = attr.ib(default=None)
@classmethod
def read(cls, fp, **kwargs):
version = read_fmt('I', fp)[0]
assert version == 3, 'Invalid version %d' % (version)
data = read_length_block(fp)
with io.BytesIO(data) as f:
rectangle = read_fmt('4I', f)
num_channels = read_fmt('I', f)[0]
channels = []
for _ in range(num_channels + 2):
channels.append(VirtualMemoryArray.read(f))
return cls(version, rectangle, channels)
def write(self, fp, **kwargs):
written = write_fmt(fp, 'I', self.version)
return written + write_length_block(fp, lambda f: self._write_body(f))
def _write_body(self, fp):
written = write_fmt(fp, '4I', *self.rectangle)
written += write_fmt(fp, 'I', len(self.channels) - 2)
for channel in self.channels:
written += channel.write(fp)
return written
@attr.s(repr=False, slots=True)
class VirtualMemoryArray(BaseElement):
"""
VirtualMemoryArrayList structure, corresponding to each channel.
.. py:attribute:: is_written
.. py:attribute:: depth
.. py:attribute:: rectangle
.. py:attribute:: pixel_depth
.. py:attribute:: compression
.. py:attribute:: data
"""
is_written = attr.ib(default=0)
depth = attr.ib(default=None)
rectangle = attr.ib(default=None)
pixel_depth = attr.ib(default=None)
compression = attr.ib(
default=Compression.RAW,
converter=Compression,
validator=in_(Compression)
)
data = attr.ib(default=b'')
@classmethod
def read(cls, fp, **kwargs):
is_written = read_fmt('I', fp)[0]
if is_written == 0:
return cls(is_written=is_written)
length = read_fmt('I', fp)[0]
if length == 0:
return cls(is_written=is_written)
depth = read_fmt('I', fp)[0]
rectangle = read_fmt('4I', fp)
pixel_depth, compression = read_fmt('HB', fp)
data = fp.read(length - 23)
return cls(
is_written, depth, rectangle, pixel_depth, compression, data
)
def write(self, fp, **kwargs):
written = write_fmt(fp, 'I', self.is_written)
if self.is_written == 0:
return written
if self.depth is None:
written += write_fmt(fp, 'I', 0)
return written
return written + write_length_block(fp, lambda f: self._write_body(f))
def _write_body(self, fp):
written = write_fmt(fp, 'I', self.depth)
written += write_fmt(fp, '4I', *self.rectangle)
written += write_fmt(
fp, 'HB', self.pixel_depth, self.compression.value
)
written += write_bytes(fp, self.data)
return written
def get_data(self):
"""Get decompressed bytes."""
if not self.is_written:
return None
width, height = self.rectangle[3], self.rectangle[2]
return decompress(
self.data, self.compression, width, height, self.depth, version=1
)
def set_data(self, size, data, depth, compression=0):
"""Set bytes."""
self.data = compress(
data, compression, size[0], size[1], depth, version=1
)
self.depth = int(depth)
self.pixel_depth = int(depth)
self.rectangle = (0, 0, int(size[1]), int(size[0]))
self.compression = Compression(compression)
self.is_written = True
| mit | 4ea22450e9c4f13a351678f012468252 | 28.267206 | 78 | 0.585005 | 3.663964 | false | false | false | false |
psd-tools/psd-tools | src/psd_tools/composite/vector.py | 1 | 15508 | import logging
from typing import Tuple
import numpy as np
from psd_tools.api.numpy_io import EXPECTED_CHANNELS, get_pattern
from psd_tools.constants import Tag
from psd_tools.terminology import Enum, Key, Klass, Type
from scipy import interpolate
logger = logging.getLogger(__name__)
def _get_color(desc) -> Tuple[float, ...]:
"""Return color tuple from descriptor.
Example descriptor::
Descriptor(b'solidColorLayer'){
'Clr ': Descriptor(b'CMYC'){
'Cyn ': 83.04,
'Mgnt': 74.03,
'Ylw ': 80.99,
'Blck': 58.3
}
}
Descriptor(b'solidColorLayer'){
'Clr ': Descriptor(b'RGBC'){
'Rd ': 235.90926200151443,
'Grn ': 232.29671984910965,
'Bl ': 25.424751117825508,
'Bk ': 'PANTONE+® Solid Coated\x00',
'Nm ': 'PANTONE 395 C\x00',
'bookID': 3060,
'bookKey': RawData(value=b'1123SC')
}
}
"""
def _get_int_color(color_desc, keys):
return tuple(float(color_desc[key]) / 255. for key in keys)
def _get_invert_color(color_desc, keys):
return tuple((100. - float(color_desc[key])) / 100. for key in keys)
def _get_rgb(x):
return _get_int_color(x, (Key.Red, Key.Green, Key.Blue))
def _get_gray(x):
return _get_invert_color(x, (Key.Gray,))
def _get_cmyk(x):
return _get_invert_color(
x, (Key.Cyan, Key.Magenta, Key.Yellow, Key.Black))
def _get_lab(x):
return _get_int_color(x, (Key.Luminance, Key.A, Key.B))
_COLOR_FUNC = {
Klass.RGBColor: _get_rgb,
Klass.Grayscale: _get_gray,
Klass.CMYKColor: _get_cmyk,
Klass.LabColor: _get_lab,
}
color_desc = desc.get(Key.Color)
assert color_desc, f"Could not find a color descriptor {desc}"
return _COLOR_FUNC[color_desc.classID](color_desc)
def draw_vector_mask(layer):
return _draw_path(layer, brush={'color': 255})
def draw_stroke(layer):
desc = layer.stroke._data
# _CAP = {
# 'strokeStyleButtCap': 0,
# 'strokeStyleSquareCap': 1,
# 'strokeStyleRoundCap': 2,
# }
# _JOIN = {
# 'strokeStyleMiterJoin': 0,
# 'strokeStyleRoundJoin': 2,
# 'strokeStyleBevelJoin': 3,
# }
width = float(desc.get('strokeStyleLineWidth', 1.))
# linejoin = desc.get('strokeStyleLineJoinType', None)
# linejoin = linejoin.enum if linejoin else 'strokeStyleMiterJoin'
# linecap = desc.get('strokeStyleLineCapType', None)
# linecap = linecap.enum if linecap else 'strokeStyleButtCap'
# miterlimit = desc.get('strokeStyleMiterLimit', 100.0) / 100.
# aggdraw >= 1.3.12 will support additional params.
return _draw_path(
layer,
pen={
'color': 255,
'width': width,
# 'linejoin': _JOIN.get(linejoin, 0),
# 'linecap': _CAP.get(linecap, 0),
# 'miterlimit': miterlimit,
}
)
def _draw_path(layer, brush=None, pen=None):
height, width = layer._psd.height, layer._psd.width
color = 0
if layer.vector_mask.initial_fill_rule and \
len(layer.vector_mask.paths) == 0:
color = 1
mask = np.full((height, width, 1), color, dtype=np.float32)
# Group merged path components.
paths = []
for subpath in layer.vector_mask.paths:
if subpath.operation == -1:
paths[-1].append(subpath)
else:
paths.append([subpath])
# Apply shape operation.
first = True
for subpath_list in paths:
plane = _draw_subpath(subpath_list, width, height, brush, pen)
assert mask.shape == (height, width, 1)
assert plane.shape == mask.shape
op = subpath_list[0].operation
if op == 0: # Exclude = Union - Intersect.
mask = mask + plane - 2 * mask * plane
elif op == 1: # Union (Combine).
mask = mask + plane - mask * plane
elif op == 2: # Subtract.
if first and brush:
mask = 1 - mask
mask = np.maximum(0, mask - plane)
elif op == 3: # Intersect.
if first and brush:
mask = 1 - mask
mask = mask * plane
first = False
return np.minimum(1, np.maximum(0, mask))
def _draw_subpath(subpath_list, width, height, brush, pen):
"""
Rasterize Bezier curves.
TODO: Replace aggdraw implementation with skimage.draw.
"""
import aggdraw
from PIL import Image
mask = Image.new('L', (width, height), 0)
draw = aggdraw.Draw(mask)
pen = aggdraw.Pen(**pen) if pen else None
brush = aggdraw.Brush(**brush) if brush else None
for subpath in subpath_list:
if len(subpath) <= 1:
logger.warning('not enough knots: %d' % len(subpath))
continue
path = ' '.join(map(str, _generate_symbol(subpath, width, height)))
symbol = aggdraw.Symbol(path)
draw.symbol((0, 0), symbol, pen, brush)
draw.flush()
del draw
return np.expand_dims(np.array(mask).astype(np.float32) / 255., 2)
def _generate_symbol(path, width, height, command='C'):
"""Sequence generator for SVG path."""
if len(path) == 0:
return
# Initial point.
yield 'M'
yield path[0].anchor[1] * width
yield path[0].anchor[0] * height
yield command
# Closed path or open path
points = (
zip(path, path[1:] +
path[0:1]) if path.is_closed() else zip(path, path[1:])
)
# Rest of the points.
for p1, p2 in points:
yield p1.leaving[1] * width
yield p1.leaving[0] * height
yield p2.preceding[1] * width
yield p2.preceding[0] * height
yield p2.anchor[1] * width
yield p2.anchor[0] * height
if path.is_closed():
yield 'Z'
def create_fill_desc(layer, desc, viewport):
"""Create a fill image."""
if desc.classID == b'solidColorLayer':
return draw_solid_color_fill(viewport, desc)
if desc.classID == b'patternLayer':
return draw_pattern_fill(viewport, layer._psd, desc)
if desc.classID == b'gradientLayer':
return draw_gradient_fill(viewport, desc)
return None, None
def create_fill(layer, viewport):
"""Create a fill image."""
if Tag.SOLID_COLOR_SHEET_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.SOLID_COLOR_SHEET_SETTING)
return draw_solid_color_fill(viewport, desc)
if Tag.PATTERN_FILL_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.PATTERN_FILL_SETTING)
return draw_pattern_fill(viewport, layer._psd, desc)
if Tag.GRADIENT_FILL_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.GRADIENT_FILL_SETTING)
return draw_gradient_fill(viewport, desc)
if Tag.VECTOR_STROKE_CONTENT_DATA in layer.tagged_blocks:
stroke = layer.tagged_blocks.get_data(Tag.VECTOR_STROKE_DATA)
if not stroke or stroke.get('fillEnabled').value is True:
desc = layer.tagged_blocks.get_data(Tag.VECTOR_STROKE_CONTENT_DATA)
if Key.Color in desc:
return draw_solid_color_fill(viewport, desc)
elif Key.Pattern in desc:
return draw_pattern_fill(viewport, layer._psd, desc)
elif Key.Gradient in desc:
return draw_gradient_fill(viewport, desc)
return None, None
def draw_solid_color_fill(viewport, desc):
"""
Create a solid color fill.
"""
fill = _get_color(desc)
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
color = np.full((height, width, len(fill)), fill, dtype=np.float32)
return color, None
def draw_pattern_fill(viewport, psd, desc):
"""
Create a pattern fill.
Example descriptor::
Descriptor(b'patternFill'){
'enab': True,
'present': True,
'showInDialog': True,
'Md ': (b'BlnM', b'CBrn'),
'Opct': 100.0 Percent,
'Ptrn': Descriptor(b'Ptrn'){
'Nm ': 'foo\x00',
'Idnt': '5e1713ab-e968-4c4c-8855-c8fa2cde8610\x00'
},
'Angl': 0.0 Angle,
'Scl ': 87.0 Percent,
'Algn': True,
'phase': Descriptor(b'Pnt '){'Hrzn': 0.0, 'Vrtc': 0.0}
}
.. todo:: Test this.
"""
pattern_id = desc[Enum.Pattern][Key.ID].value.rstrip('\x00')
pattern = psd._get_pattern(pattern_id)
if not pattern:
logger.error('Pattern not found: %s' % (pattern_id))
return None, None
panel = get_pattern(pattern)
assert panel.shape[0] > 0
scale = float(desc.get(Key.Scale, 100.)) / 100.
if scale != 1.:
from skimage.transform import resize
new_shape = (
max(1, int(panel.shape[0] * scale)),
max(1, int(panel.shape[1] * scale))
)
panel = resize(panel, new_shape)
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
reps = (
int(np.ceil(float(height) / panel.shape[0])),
int(np.ceil(float(width) / panel.shape[1])),
1,
)
channels = EXPECTED_CHANNELS.get(pattern.image_mode)
pixels = np.tile(panel, reps)[:height, :width, :]
if pixels.shape[2] > channels:
return pixels[:, :, :channels], pixels[:, :, -1:]
return pixels, None
def draw_gradient_fill(viewport, desc):
"""
Create a gradient fill image.
"""
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
angle = float(desc.get(Key.Angle, 0))
scale = float(desc.get(Key.Scale, 100.)) / 100.
ratio = (angle % 90)
scale *= (90. - ratio) / 90. * width + (ratio / 90.) * height
X, Y = np.meshgrid(
np.linspace(-width / scale, width / scale, width, dtype=np.float32),
np.linspace(-height / scale, height / scale, height, dtype=np.float32),
)
gradient_kind = desc.get(Key.Type).enum
if gradient_kind == Enum.Linear:
Z = _make_linear_gradient(X, Y, angle)
elif gradient_kind == Enum.Radial:
Z = _make_radial_gradient(X, Y)
elif gradient_kind == Enum.Angle:
Z = _make_angle_gradient(X, Y, angle)
elif gradient_kind == Enum.Reflected:
Z = _make_reflected_gradient(X, Y, angle)
elif gradient_kind == Enum.Diamond:
Z = _make_diamond_gradient(X, Y, angle)
else:
# Unsupported: b'shapeburst', only avail in stroke effect
logger.warning('Unknown gradient style: %s.' % (gradient_kind))
Z = np.full((height, width), 0.5, dtype=np.float32)
Z = np.maximum(0., np.minimum(1., Z))
if bool(desc.get(Key.Reverse, False)):
Z = 1. - Z
G, Ga = _make_gradient_color(desc.get(Key.Gradient))
color = G(Z) if G is not None else None
shape = np.expand_dims(Ga(Z), 2) if Ga is not None else None
return color, shape
def _make_linear_gradient(X, Y, angle):
"""Generates index map for linear gradients."""
theta = np.radians(angle % 360)
Z = .5 * (np.cos(theta) * X - np.sin(theta) * Y + 1)
return Z
def _make_radial_gradient(X, Y):
"""Generates index map for radial gradients."""
Z = np.sqrt(np.power(X, 2) + np.power(Y, 2))
return Z
def _make_angle_gradient(X, Y, angle):
"""Generates index map for angle gradients."""
Z = (((180 * np.arctan2(Y, X) / np.pi) + angle) % 360) / 360
return Z
def _make_reflected_gradient(X, Y, angle):
"""Generates index map for reflected gradients."""
theta = np.radians(angle % 360)
Z = np.abs((np.cos(theta) * X - np.sin(theta) * Y))
return Z
def _make_diamond_gradient(X, Y, angle):
"""Generates index map for diamond gradients."""
theta = np.radians(angle % 360)
Z = np.abs(np.cos(theta) * X - np.sin(theta) *
Y) + np.abs(np.sin(theta) * X + np.cos(theta) * Y)
return Z
def _make_gradient_color(grad):
gradient_form = grad.get(Type.GradientForm).enum
if gradient_form == Enum.ColorNoise:
return _make_noise_gradient_color(grad)
elif gradient_form == Enum.CustomStops:
return _make_linear_gradient_color(grad)
logger.error('Unknown gradient form: %s' % gradient_form)
return None
def _make_linear_gradient_color(grad):
X, Y = [], []
for stop in grad.get(Key.Colors, []):
location = float(stop.get(Key.Location)) / 4096.
color = np.array(_get_color(stop), dtype=np.float32)
if len(X) and X[-1] == location:
logger.debug('Duplicate stop at %d' % location)
X.pop(), Y.pop()
X.append(location), Y.append(color)
assert len(X) > 0
if len(X) == 1:
X = [0., 1.]
Y = [Y[0], Y[0]]
G = interpolate.interp1d(
X, Y, axis=0, bounds_error=False, fill_value=(Y[0], Y[-1])
)
if Key.Transparency not in grad:
return G, None
X, Y = [], []
for stop in grad.get(Key.Transparency):
location = float(stop.get(Key.Location)) / 4096.
opacity = float(stop.get(Key.Opacity)) / 100.
if len(X) and X[-1] == location:
logger.debug('Duplicate stop at %d' % location)
X.pop(), Y.pop()
X.append(location), Y.append(opacity)
assert len(X) > 0
if len(X) == 1:
X = [0., 1.]
Y = [Y[0], Y[0]]
Ga = interpolate.interp1d(
X, Y, axis=0, bounds_error=False, fill_value=(Y[0], Y[-1])
)
return G, Ga
def _make_noise_gradient_color(grad):
"""
Make a noise gradient color.
TODO: Improve noise gradient quality.
Example:
Descriptor(b'Grdn'){
'Nm ': 'Custom\x00',
'GrdF': (b'GrdF', b'ClNs'),
'ShTr': False,
'VctC': False,
'ClrS': (b'ClrS', b'RGBC'),
'RndS': 3650322,
'Smth': 2048,
'Mnm ': [0, 0, 0, 0],
'Mxm ': [0, 100, 100, 100]
}
"""
from scipy.ndimage.filters import maximum_filter1d, uniform_filter1d
logger.debug('Noise gradient is not accurate.')
roughness = grad.get(Key.Smoothness).value / 4096. # Larger is sharper.
maximum = np.array([x.value for x in grad.get(Key.Maximum)],
dtype=np.float32)
minimum = np.array([x.value for x in grad.get(Key.Minimum)],
dtype=np.float32)
seed = grad.get(Key.RandomSeed).value
rng = np.random.RandomState(seed)
Y = rng.binomial(1, .5, (256, len(maximum))).astype(np.float32)
size = max(1, int(roughness))
Y = maximum_filter1d(Y, size, axis=0)
Y = uniform_filter1d(Y, size * 64, axis=0)
Y = Y / np.max(Y, axis=0)
Y = ((maximum - minimum) * Y + minimum) / 100.
X = np.linspace(0, 1, 256, dtype=np.float32)
if grad.get(Key.ShowTransparency):
G = interpolate.interp1d(
X,
Y[:, :-1],
axis=0,
bounds_error=False,
fill_value=(Y[0, :-1], Y[-1, :-1])
)
Ga = interpolate.interp1d(
X,
Y[:, -1],
axis=0,
bounds_error=False,
fill_value=(Y[0, -1], Y[-1, -1])
)
else:
G = interpolate.interp1d(
X,
Y[:, :3],
axis=0,
bounds_error=False,
fill_value=(Y[0, :3], Y[-1, :3])
)
Ga = None
return G, Ga
| mit | a6505c3e784426745258cdc07c364a6d | 31.039256 | 79 | 0.564261 | 3.274974 | false | false | false | false |
rsheftel/pandas_market_calendars | pandas_market_calendars/calendar_utils.py | 1 | 11132 | """
Utilities to use with market_calendars
"""
import itertools
import warnings
import pandas as pd
import numpy as np
def merge_schedules(schedules, how='outer'):
"""
Given a list of schedules will return a merged schedule. The merge method (how) will either return the superset
of any datetime when any schedule is open (outer) or only the datetime where all markets are open (inner)
CAVEATS:
* This does not work for schedules with breaks, the break information will be lost.
* Onlu "market_open" and "market_close" are considered, other market times are not yet supported.
:param schedules: list of schedules
:param how: outer or inner
:return: schedule DataFrame
"""
all_cols = [x.columns for x in schedules]
all_cols = list(itertools.chain(*all_cols))
if ('break_start' in all_cols) or ('break_end' in all_cols):
warnings.warn('Merge schedules will drop the break_start and break_end from result.')
result = schedules[0]
for schedule in schedules[1:]:
result = result.merge(schedule, how=how, right_index=True, left_index=True)
if how == 'outer':
result['market_open'] = result.apply(lambda x: min(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: max(x.market_close_x, x.market_close_y), axis=1)
elif how == 'inner':
result['market_open'] = result.apply(lambda x: max(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: min(x.market_close_x, x.market_close_y), axis=1)
else:
raise ValueError('how argument must be "inner" or "outer"')
result = result[['market_open', 'market_close']]
return result
def convert_freq(index, frequency):
"""
Converts a DateTimeIndex to a new lower frequency
:param index: DateTimeIndex
:param frequency: frequency string
:return: DateTimeIndex
"""
return pd.DataFrame(index=index).asfreq(frequency).index
class _date_range:
"""
This is a callable class that should be used by calling the already initiated instance: `date_range`.
Given a schedule, it will return a DatetimeIndex with all of the valid datetimes at the frequency given.
The schedule columns should all have the same time zone.
The calculations will be made for each trading session. If the passed schedule-DataFrame doesn't have
breaks, there is one trading session per day going from market_open to market_close, otherwise there are two,
the first one going from market_open to break_start and the second one from break_end to market_close.
*Any trading session where start == end is considered a 'no-trading session' and will always be dropped*
CAVEATS:
* Only "market_open", "market_close" (and, optionally, "breaak_start" and "break_end")
are considered, other market times are not yet supported by this class.
* If the difference between start and end of a trading session is smaller than an interval of the
frequency, and closed= "right" and force_close = False, the whole session will disappear.
This will also raise a warning.
Signature:
.__call__(self, schedule, frequency, closed='right', force_close=True, **kwargs)
:param schedule: schedule of a calendar, which may or may not include break_start and break_end columns
:param frequency: frequency string that is used by pd.Timedelta to calculate the timestamps
this must be "1D" or higher frequency
:param closed: the way the intervals are labeled
'right': use the end of the interval
'left': use the start of the interval
None: (or 'both') use the end of the interval but include the start of the first interval (the open)
:param force_close: how the last value of a trading session is handled
True: guarantee that the close of the trading session is the last value
False: guarantee that there is no value greater than the close of the trading session
None: leave the last value as it is calculated based on the closed parameter
:param kwargs: unused. Solely for compatibility.
"""
def __init__(self, schedule = None, frequency= None, closed='right', force_close=True):
if not closed in ("left", "right", "both", None):
raise ValueError("closed must be 'left', 'right', 'both' or None.")
elif not force_close in (True, False, None):
raise ValueError("force_close must be True, False or None.")
self.closed = closed
self.force_close = force_close
self.has_breaks = False
if frequency is None: self.frequency = None
else:
self.frequency = pd.Timedelta(frequency)
if self.frequency > pd.Timedelta("1D"):
raise ValueError('Frequency must be 1D or higher frequency.')
elif schedule.market_close.lt(schedule.market_open).any():
raise ValueError("Schedule contains rows where market_close < market_open,"
" please correct the schedule")
if "break_start" in schedule:
if not all([
schedule.market_open.le(schedule.break_start).all(),
schedule.break_start.le(schedule.break_end).all(),
schedule.break_end.le(schedule.market_close).all()]):
raise ValueError("Not all rows match the condition: "
"market_open <= break_start <= break_end <= market_close, "
"please correct the schedule")
self.has_breaks = True
def _check_overlap(self, schedule):
"""checks if calculated end times would overlap with the next start times.
Only an issue when force_close is None and closed != left.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:raises ValueError:"""
if self.force_close is None and self.closed != "left":
num_bars = self._calc_num_bars(schedule)
end_times = schedule.start + num_bars * self.frequency
if end_times.gt(schedule.start.shift(-1)).any():
raise ValueError(f"The chosen frequency will lead to overlaps in the calculated index. "
f"Either choose a higher frequency or avoid setting force_close to None "
f"when setting closed to 'right', 'both' or None.")
def _check_disappearing_session(self, schedule):
"""checks if requested frequency and schedule would lead to lost trading sessions.
Only necessary when force_close = False and closed = "right".
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:raises UserWarning:"""
if self.force_close is False and self.closed == "right":
if (schedule.end- schedule.start).lt(self.frequency).any():
warnings.warn("An interval of the chosen frequency is larger than some of the trading sessions, "
"while closed== 'right' and force_close is False. This will make those trading sessions "
"disappear. Use a higher frequency or change the values of closed/force_close, to "
"keep this from happening.")
def _calc_num_bars(self, schedule):
"""calculate the number of timestamps needed for each trading session.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:return: pd.Series of float64"""
return np.ceil((schedule.end - schedule.start) / self.frequency)
def _calc_time_series(self, schedule):
"""Method used by date_range to calculate the trading index.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:return: pd.Series of datetime64[ns, UTC]"""
num_bars = self._calc_num_bars(schedule)
# ---> calculate the desired timeseries:
if self.closed == "left":
opens = schedule.start.repeat(num_bars) # keep as is
time_series = (opens.groupby(opens.index).cumcount()) * self.frequency + opens
elif self.closed == "right":
opens = schedule.start.repeat(num_bars) # dont add row but shift up
time_series = (opens.groupby(opens.index).cumcount()+ 1) * self.frequency + opens
else:
num_bars += 1
opens = schedule.start.repeat(num_bars) # add row but dont shift up
time_series = (opens.groupby(opens.index).cumcount()) * self.frequency + opens
if not self.force_close is None:
time_series = time_series[time_series.le(schedule.end.repeat(num_bars))]
if self.force_close:
time_series = pd.concat([time_series, schedule.end]).sort_values()
return time_series
def __call__(self, schedule, frequency, closed='right', force_close=True, **kwargs):
"""
See class docstring for more information.
:param schedule: schedule of a calendar, which may or may not include break_start and break_end columns
:param frequency: frequency string that is used by pd.Timedelta to calculate the timestamps
this must be "1D" or higher frequency
:param closed: the way the intervals are labeled
'right': use the end of the interval
'left': use the start of the interval
None: (or 'both') use the end of the interval but include the start of the first interval
:param force_close: how the last value of a trading session is handled
True: guarantee that the close of the trading session is the last value
False: guarantee that there is no value greater than the close of the trading session
None: leave the last value as it is calculated based on the closed parameter
:param kwargs: unused. Solely for compatibility.
:return: pd.DatetimeIndex of datetime64[ns, UTC]
"""
self.__init__(schedule, frequency, closed, force_close)
if self.has_breaks:
# rearrange the schedule, to make every row one session
before = schedule[["market_open", "break_start"]].set_index(schedule["market_open"])
after = schedule[["break_end", "market_close"]].set_index(schedule["break_end"])
before.columns = after.columns = ["start", "end"]
schedule = pd.concat([before, after]).sort_index()
else:
schedule = schedule.rename(columns= {"market_open": "start", "market_close": "end"})
schedule = schedule[schedule.start.ne(schedule.end)] # drop the 'no-trading sessions'
self._check_overlap(schedule)
self._check_disappearing_session(schedule)
time_series = self._calc_time_series(schedule)
time_series.name = None
return pd.DatetimeIndex(time_series.drop_duplicates())
date_range = _date_range()
| mit | 3ad10739d05412b15562dee6fa28fc5e | 48.475556 | 115 | 0.641574 | 4.247234 | false | false | false | false |
reanahub/reana-client | reana_client/printer.py | 1 | 1833 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""reana-client output print configuration."""
import click
from reana_client.config import (
PRINTER_COLOUR_ERROR,
PRINTER_COLOUR_INFO,
PRINTER_COLOUR_SUCCESS,
PRINTER_COLOUR_WARNING,
)
def display_message(msg, msg_type=None, indented=False):
"""Display messages in console.
:param msg: Message to display
:param msg_type: Type of message (info/note/warning/error)
:param indented: Message indented or not
:type msg: str
:type msg_type: str
:type indented: bool
"""
msg_color_map = {
"success": PRINTER_COLOUR_SUCCESS,
"warning": PRINTER_COLOUR_WARNING,
"error": PRINTER_COLOUR_ERROR,
"info": PRINTER_COLOUR_INFO,
}
msg_color = msg_color_map.get(msg_type, "")
if msg_type == "info":
if indented:
click.secho(
" -> {}: ".format(msg_type.upper()),
bold=True,
nl=False,
fg=msg_color,
)
click.secho("{}".format(msg), nl=True)
else:
click.secho("==> ", bold=True, nl=False)
click.secho("{}".format(msg), bold=True, nl=True)
elif msg_type in ["error", "warning", "success"]:
prefix_tpl = " -> {}: " if indented else "==> {}: "
click.secho(
prefix_tpl.format(msg_type.upper()),
bold=True,
nl=False,
err=msg_type == "error",
fg=msg_color,
)
click.secho("{}".format(msg), bold=False, err=msg_type == "error", nl=True)
else:
click.secho("{}".format(msg), nl=True)
| mit | cc456bcb9a99d4cd6e532148aeaa7b07 | 28.564516 | 83 | 0.559738 | 3.445489 | false | false | false | false |
nerevu/riko | riko/modules/typecast.py | 1 | 4059 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.typecast
~~~~~~~~~~~~~~~~~~~~~
Provides functions for casting fields into specific types.
Examples:
basic usage::
>>> from riko.modules.typecast import pipe
>>>
>>> conf = {'type': 'date'}
>>> next(pipe({'content': '5/4/82'}, conf=conf))['typecast']['year']
1982
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko.utils import cast
OPTS = {'field': 'content'}
DEFAULTS = {'type': 'text'}
logger = gogo.Gogo(__name__, monolog=True).logger
def parser(content, objconf, skip=False, **kwargs):
""" Parsers the pipe content
Args:
content (scalar): The content to cast
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: typecast)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': '1.0'}
>>> objconf = Objectify({'type': 'int'})
>>> kwargs = {'stream': item, 'assign': 'content'}
>>> parser(item['content'], objconf, **kwargs)
1
"""
return kwargs['stream'] if skip else cast(content, objconf.type)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with type casted content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['typecast'])
... d = async_pipe({'content': '1.0'}, conf={'type': 'int'})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
1
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A processor that parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with type casted content
Examples:
>>> from datetime import datetime as dt
>>> next(pipe({'content': '1.0'}, conf={'type': 'int'}))['typecast']
1
>>> item = {'content': '5/4/82'}
>>> conf = {'type': 'date'}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': dt(1982, 5, 4).timetuple()}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': 'False'}
>>> conf = {'type': 'bool'}
>>> next(pipe(item, conf=conf, emit=True))
False
"""
return parser(*args, **kwargs)
| mit | 2086de5dd289a3db79444ccd73a78212 | 29.75 | 79 | 0.573787 | 3.884211 | false | false | false | false |
nerevu/riko | riko/modules/timeout.py | 1 | 6570 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.timeout
~~~~~~~~~~~~~~~~~~~~
Provides functions for returning items from a stream until a certain amount of
time has passed.
Contrast this with the truncate module, which also limits the number of items,
but returns items based on a count.
Examples:
basic usage::
>>> from time import sleep
>>> from riko.modules.timeout import pipe
>>>
>>> def gen_items():
... for x in range(50):
... sleep(1)
... yield {'x': x}
>>>
>>> len(list(pipe(gen_items(), conf={'seconds': '3'})))
3
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import signal
from datetime import timedelta
from . import operator
import pygogo as gogo
OPTS = {'ptype': 'int'}
DEFAULTS = {}
logger = gogo.Gogo(__name__, monolog=True).logger
class TimeoutIterator(object):
def __init__(self, elements, timeout=0):
self.iter = iter(elements)
self.timeout = timeout
self.timedout = False
self.started = False
def _handler(self, signum, frame):
self.timedout = True
def __iter__(self):
return self
def __next__(self):
if self.timedout:
raise StopIteration
elif not self.started:
signal.signal(signal.SIGALRM, self._handler)
signal.alarm(self.timeout)
self.started = True
try:
return next(self.iter)
except StopIteration:
signal.alarm(0)
self.timedout = True
raise StopIteration
def parser(stream, objconf, tuples, **kwargs):
""" Parses the pipe content
Args:
stream (Iter[dict]): The source. Note: this shares the `tuples`
iterator, so consuming it will consume `tuples` as well.
objconf (obj): the item independent configuration (an Objectify
instance).
tuples (Iter[(dict, obj)]): Iterable of tuples of (item, objconf)
`item` is an element in the source stream and `objconf` is the item
configuration (an Objectify instance). Note: this shares the
`stream` iterator, so consuming it will consume `stream` as well.
kwargs (dict): Keyword arguments.
Returns:
Iter(dict): The output stream
Examples:
>>> from time import sleep
>>> from meza.fntools import Objectify
>>> from itertools import repeat, count
>>>
>>> kwargs = {'seconds': 3}
>>> objconf = Objectify(kwargs)
>>>
>>> def gen_stream():
... for x in count():
... sleep(1)
... yield {'x': x}
>>>
>>> stream = gen_stream()
>>> tuples = zip(stream, repeat(objconf))
>>> len(list(parser(stream, objconf, tuples, **kwargs)))
3
"""
time = int(timedelta(**objconf).total_seconds())
return TimeoutIterator(stream, time)
@operator(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""An aggregator that asynchronously returns items from a stream until a
certain amount of time has passed.
Args:
items (Iter[dict]): The source.
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain any of the following
keys: 'days', 'seconds', 'microseconds', 'milliseconds', 'minutes',
'hours', 'weeks'.
days (int): Number of days before signaling a timeout (default: 0)
seconds (int): Number of seconds before signaling a timeout
(default: 0)
microseconds (int): Number of microseconds before signaling a
timeout (default: 0)
milliseconds (int): Number of milliseconds before signaling a
timeout (default: 0)
minutes (int): Number of minutes before signaling a timeout
(default: 0)
hours (int): Number of hours before signaling a timeout
(default: 0)
weeks (int): Number of weeks before signaling a timeout
(default: 0)
Returns:
Deferred: twisted.internet.defer.Deferred stream
Examples:
>>> from time import sleep
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def gen_items():
... for x in range(50):
... sleep(1)
... yield {'x': x}
>>>
>>> def run(reactor):
... callback = lambda x: print(len(list(x)))
... d = async_pipe(gen_items(), conf={'seconds': '3'})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
3
"""
return parser(*args, **kwargs)
@operator(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""An operator that returns items from a stream until a certain amount of
time has passed.
Args:
items (Iter[dict]): The source.
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain any of the following
keys: 'days', 'seconds', 'microseconds', 'milliseconds', 'minutes',
'hours', 'weeks'.
days (int): Number of days before signaling a timeout (default: 0)
seconds (int): Number of seconds before signaling a timeout
(default: 0)
microseconds (int): Number of microseconds before signaling a
timeout (default: 0)
milliseconds (int): Number of milliseconds before signaling a
timeout (default: 0)
minutes (int): Number of minutes before signaling a timeout
(default: 0)
hours (int): Number of hours before signaling a timeout
(default: 0)
weeks (int): Number of weeks before signaling a timeout
(default: 0)
Yields:
dict: an item
Examples:
>>> from time import sleep
>>>
>>> def gen_items():
... for x in range(50):
... sleep(1)
... yield {'x': x}
>>>
>>> len(list(pipe(gen_items(), conf={'seconds': '3'})))
3
"""
return parser(*args, **kwargs)
| mit | c8b81e293fe2ef11caa48b1ec43364ad | 30.137441 | 79 | 0.554033 | 4.385848 | false | false | false | false |
nerevu/riko | riko/bado/util.py | 1 | 2503 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.bado.util
~~~~~~~~~~~~~~
Provides functions for creating asynchronous riko pipes
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.bado.util import xml2etree
"""
from os import environ
from sys import executable
from functools import partial
from riko.parsers import _make_content, entity2text
try:
from twisted.internet.defer import maybeDeferred, Deferred
except ImportError:
maybeDeferred = lambda *args: None
else:
from twisted.internet import defer
from twisted.internet.utils import getProcessOutput
from twisted.internet.reactor import callLater
from . import microdom
from .microdom import EntityReference
async_none = defer.succeed(None)
async_return = partial(defer.succeed)
async_partial = lambda f, **kwargs: partial(maybeDeferred, f, **kwargs)
def async_sleep(seconds):
d = Deferred()
callLater(seconds, d.callback, None)
return d
def defer_to_process(command):
return getProcessOutput(executable, ['-c', command], environ)
def xml2etree(f, xml=True):
readable = hasattr(f, 'read')
if xml and readable:
parse = microdom.parseXML
elif readable:
parse = partial(microdom.parse, lenient=True)
elif xml:
parse = microdom.parseXMLString
else:
parse = partial(microdom.parseString, lenient=True)
return parse(f)
def etree2dict(element, tag='content'):
"""Convert a microdom element tree into a dict imitating how Yahoo Pipes
does it.
TODO: checkout twisted.words.xish
"""
i = dict(element.attributes) if hasattr(element, 'attributes') else {}
value = element.nodeValue if hasattr(element, 'nodeValue') else None
if isinstance(element, EntityReference):
value = entity2text(value)
i.update(_make_content(i, value, tag))
for child in element.childNodes:
tag = child.tagName if hasattr(child, 'tagName') else 'content'
value = etree2dict(child, tag)
# try to join the content first since microdom likes to split up
# elements that contain a mix of text and entity reference
try:
i.update(_make_content(i, value, tag, append=False))
except TypeError:
i.update(_make_content(i, value, tag))
if ('content' in i) and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = i['content']
return i
| mit | 91fb39d6abea65cec02105c714975924 | 26.505495 | 76 | 0.672393 | 3.941732 | false | false | false | false |
nerevu/riko | riko/utils.py | 1 | 16899 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.utils
~~~~~~~~~~~~~~
Provides utility classes and functions
"""
import re
import sys
import itertools as it
import fcntl
from math import isnan
from functools import partial, wraps
from operator import itemgetter
from os import O_NONBLOCK, path as p
from io import BytesIO, StringIO, TextIOBase
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, Request
from werkzeug.local import LocalProxy
import requests
import pygogo as gogo
import mezmorize
try:
import __builtin__ as _builtins
except ImportError:
import builtins as _builtins
from meza import compat
from meza.io import reencode
from meza.fntools import SleepyDict
from mezmorize.utils import get_cache_type
from riko import ENCODING, __version__
from riko.cast import cast
logger = gogo.Gogo(__name__, verbose=False, monolog=True).logger
DEF_NS = 'https://github.com/nerevu/riko'
def get_abspath(url):
url = 'http://%s' % url if url and '://' not in url else url
if url and url.startswith('file:///'):
# already have an abspath
pass
elif url and url.startswith('file://'):
parent = p.dirname(p.dirname(__file__))
rel_path = url[7:]
abspath = p.abspath(p.join(parent, rel_path))
url = 'file://%s' % abspath
return compat.decode(url)
# https://trac.edgewall.org/ticket/2066#comment:1
# http://stackoverflow.com/a/22675049/408556
def make_blocking(f):
fd = f.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if flags & O_NONBLOCK:
blocking = flags & ~O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, blocking)
if 'nose' in sys.modules:
logger.debug('Running in nose environment...')
make_blocking(sys.stderr)
def default_user_agent(name="riko"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
class Chainable(object):
def __init__(self, data, method=None):
self.data = data
self.method = method
self.list = list(data)
def __getattr__(self, name):
funcs = (partial(getattr, x) for x in [self.data, _builtins, it])
zipped = zip(funcs, it.repeat(AttributeError))
method = multi_try(name, zipped, default=None)
return Chainable(self.data, method)
def __call__(self, *args, **kwargs):
try:
return Chainable(self.method(self.data, *args, **kwargs))
except TypeError:
return Chainable(self.method(args[0], self.data, **kwargs))
def invert_dict(d):
return {v: k for k, v in d.items()}
def multi_try(source, zipped, default=None):
value = None
for func, error in zipped:
try:
value = func(source)
except error:
pass
else:
return value
else:
return default
def get_response_content_type(response):
try:
content_type = response.getheader('Content-Type', '')
except AttributeError:
content_type = response.headers.get('Content-Type', '')
return content_type.lower()
def get_response_encoding(response, def_encoding=ENCODING):
info = response.info()
try:
encoding = info.getencoding()
except AttributeError:
encoding = info.get_charset()
encoding = None if encoding == '7bit' else encoding
if not encoding and hasattr(info, 'get_content_charset'):
encoding = info.get_content_charset()
if not encoding:
content_type = get_response_content_type(response)
if 'charset' in content_type:
ctype = content_type.split('=')[1]
encoding = ctype.strip().strip('"').strip("'")
extracted = encoding or def_encoding
assert extracted
return extracted
# https://docs.python.org/3.3/reference/expressions.html#examples
def auto_close(stream, f):
try:
for record in stream:
yield record
finally:
f.close()
def opener(url, memoize=False, delay=0, encoding=ENCODING, params=None, **kwargs):
params = params or {}
timeout = kwargs.get('timeout')
decode = kwargs.get('decode')
if url.startswith('http') and params:
r = requests.get(url, params=params, stream=True)
r.raw.decode_content = decode
response = r.text if memoize else r.raw
else:
req = Request(url, headers={'User-Agent': default_user_agent()})
context = SleepyDict(delay=delay) if delay else None
try:
r = urlopen(req, context=context, timeout=timeout)
except TypeError:
r = urlopen(req, timeout=timeout)
except HTTPError as e:
raise URLError(f'{url} returned {e.code}: {e.reason}')
except URLError as e:
raise URLError(f'{url}: {e.reason}')
text = r.read() if memoize else None
if decode:
encoding = get_response_encoding(r, encoding)
if text:
response = compat.decode(text, encoding)
else:
response = reencode(r.fp, encoding, decode=True)
response.r = r
else:
response = text or r
content_type = get_response_content_type(r)
return (response, content_type)
def get_opener(memoize=False, **kwargs):
wrapper = partial(opener, memoize=memoize, **kwargs)
current_opener = wraps(opener)(wrapper)
if memoize:
kwargs.setdefault('cache_type', get_cache_type(spread=False))
memoizer = mezmorize.memoize(**kwargs)
current_opener = memoizer(current_opener)
return current_opener
class fetch(TextIOBase):
# http://stackoverflow.com/a/22836333/408556
def __init__(self, url=None, memoize=False, **kwargs):
# TODO: need to use separate timeouts for memoize and urlopen
if memoize:
self.opener = LocalProxy(lambda: get_opener(memoize=True, **kwargs))
else:
self.opener = get_opener(**kwargs)
responses = self.opener(get_abspath(url))
try:
response, self.content_type = responses
except ValueError:
# HACK: This happens for memoized responses. Not sure why though!
response, self.content_type = responses, 'application/json'
if memoize:
wrapper = StringIO if kwargs.get('decode') else BytesIO
f = wrapper(response)
else:
f = response
self.close = f.close
self.read = f.read
self.readline = f.readline
try:
self.seek = f.seek
except AttributeError:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def ext(self):
if not self.content_type:
ext = None
elif 'xml' in self.content_type:
ext = 'xml'
elif 'json' in self.content_type:
ext = 'json'
else:
ext = self.content_type.split('/')[1].split(';')[0]
return ext
def def_itemgetter(attr, default=0, _type=None):
# like operator.itemgetter but fills in missing keys with a default value
def keyfunc(item):
value = item.get(attr, default)
casted = cast(value, _type) if _type else value
try:
is_nan = isnan(casted)
except TypeError:
is_nan = False
return default if is_nan else casted
return keyfunc
# TODO: move this to meza.process.group
def group_by(iterable, attr, default=None):
keyfunc = def_itemgetter(attr, default)
data = list(iterable)
order = unique_everseen(data, keyfunc)
sorted_iterable = sorted(data, key=keyfunc)
grouped = it.groupby(sorted_iterable, keyfunc)
groups = {str(k): list(v) for k, v in grouped}
# return groups in original order
return ((key, groups[key]) for key in order)
def unique_everseen(iterable, key=None):
# List unique elements, preserving order. Remember all elements ever seen
# unique_everseen('ABBCcAD', str.lower) --> a b c d
seen = set()
for element in iterable:
k = str(key(element))
if k not in seen:
seen.add(k)
yield k
def betwix(iterable, start=None, stop=None, inc=False):
""" Extract selected elements from an iterable. But unlike `islice`,
extract based on the element's value instead of its position.
Args:
iterable (iter): The initial sequence
start (str): The fragment to begin with (inclusive)
stop (str): The fragment to finish at (exclusive)
inc (bool): Make stop operate inclusively (useful if reading a file and
the start and stop fragments are on the same line)
Returns:
Iter: New dict with specified keys removed
Examples:
>>> from io import StringIO
>>>
>>> list(betwix('ABCDEFG', stop='C')) == ['A', 'B']
True
>>> list(betwix('ABCDEFG', 'C', 'E')) == ['C', 'D']
True
>>> list(betwix('ABCDEFG', 'C')) == ['C', 'D', 'E', 'F', 'G']
True
>>> f = StringIO('alpha\\n<beta>\\ngamma\\n')
>>> list(betwix(f, '<', '>', True)) == ['<beta>\\n']
True
>>> list(betwix('ABCDEFG', 'C', 'E', True)) == ['C', 'D', 'E']
True
"""
def inc_takewhile(predicate, _iter):
for x in _iter:
yield x
if not predicate(x):
break
get_pred = lambda sentinel: lambda x: sentinel not in x
pred = get_pred(stop)
first = it.dropwhile(get_pred(start), iterable) if start else iterable
if stop and inc:
last = inc_takewhile(pred, first)
elif stop:
last = it.takewhile(pred, first)
else:
last = first
return last
def dispatch(split, *funcs):
"""takes a tuple of items and delivers each item to a different function
/--> item1 --> double(item1) -----> \
/ \
split ----> item2 --> triple(item2) -----> _OUTPUT
\\ /
\\--> item3 --> quadruple(item3) --> /
One way to construct such a flow in code would be::
split = ('bar', 'baz', 'qux')
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(split, double, triple, quadruple)
_OUTPUT == ('barbar', 'bazbazbaz', 'quxquxquxqux')
"""
return [func(item) for item, func in zip(split, funcs)]
def broadcast(item, *funcs):
"""delivers the same item to different functions
/--> item --> double(item) -----> \
/ \
item -----> item --> triple(item) -----> _OUTPUT
\\ /
\\--> item --> quadruple(item) --> /
One way to construct such a flow in code would be::
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = broadcast('bar', double, triple, quadruple)
_OUTPUT == ('barbar', 'bazbazbaz', 'quxquxquxqux')
"""
return [func(item) for func in funcs]
def _gen_words(match, splits):
groups = list(it.dropwhile(lambda x: not x, match.groups()))
for s in splits:
try:
num = int(s)
except ValueError:
word = s
else:
word = next(it.islice(groups, num, num + 1))
yield word
def multi_substitute(word, rules):
""" Apply multiple regex rules to 'word'
http://code.activestate.com/recipes/
576710-multi-regex-single-pass-replace-of-multiple-regexe/
"""
flags = rules[0]['flags']
# Create a combined regex from the rules
tuples = ((p, r['match']) for p, r in enumerate(rules))
regexes = ('(?P<match_%i>%s)' % (p, r) for p, r in tuples)
pattern = '|'.join(regexes)
regex = re.compile(pattern, flags)
resplit = re.compile('\\$(\\d+)')
# For each match, look-up corresponding replace value in dictionary
rules_in_series = filter(itemgetter('series'), rules)
rules_in_parallel = (r for r in rules if not r['series'])
try:
has_parallel = [next(rules_in_parallel)]
except StopIteration:
has_parallel = []
# print('================')
# pprint(rules)
# print('word:', word)
# print('pattern', pattern)
# print('flags', flags)
for _ in it.chain(rules_in_series, has_parallel):
# print('~~~~~~~~~~~~~~~~')
# print('new round')
# print('word:', word)
# found = list(regex.finditer(word))
# matchitems = [match.groupdict().items() for match in found]
# pprint(matchitems)
prev_name = None
prev_is_series = None
i = 0
for match in regex.finditer(word):
items = match.groupdict().items()
item = next(filter(itemgetter(1), items))
# print('----------------')
# print('groupdict:', match.groupdict().items())
# print('item:', item)
if not item:
continue
name = item[0]
rule = rules[int(name[6:])]
series = rule.get('series')
kwargs = {'count': rule['count'], 'series': series}
is_previous = name == prev_name
singlematch = kwargs['count'] == 1
is_series = prev_is_series or kwargs['series']
isnt_previous = bool(prev_name) and not is_previous
if (is_previous and singlematch) or (isnt_previous and is_series):
continue
prev_name = name
prev_is_series = series
if resplit.findall(rule['replace']):
splits = resplit.split(rule['replace'])
words = _gen_words(match, splits)
else:
splits = rule['replace']
start = match.start() + i
end = match.end() + i
words = [word[:start], splits, word[end:]]
i += rule['offset']
word = ''.join(words)
# print('name:', name)
# print('prereplace:', rule['replace'])
# print('splits:', splits)
# print('resplits:', resplit.findall(rule['replace']))
# print('groups:', filter(None, match.groups()))
# print('i:', i)
# print('words:', words)
# print('range:', match.start(), '-', match.end())
# print('replace:', word)
# print('substitution:', word)
return word
def substitute(word, rule):
if word:
result = rule['match'].subn(rule['replace'], word, rule['count'])
replaced, replacements = result
if rule.get('default') is not None and not replacements:
replaced = rule.get('default')
else:
replaced = word
return replaced
def get_new_rule(rule, recompile=False):
flags = 0 if rule.get('casematch') else re.IGNORECASE
if not rule.get('singlelinematch'):
flags |= re.MULTILINE
flags |= re.DOTALL
count = 1 if rule.get('singlematch') else 0
if recompile and '$' in rule['replace']:
replace = re.sub(r'\$(\d+)', r'\\\1', rule['replace'], 0)
else:
replace = rule['replace']
match = re.compile(rule['match'], flags) if recompile else rule['match']
nrule = {
'match': match,
'replace': replace,
'default': rule.get('default'),
'field': rule.get('field'),
'count': count,
'flags': flags,
'series': rule.get('seriesmatch', True),
'offset': int(rule.get('offset') or 0),
}
return nrule
def multiplex(sources):
"""Combine multiple generators into one"""
return it.chain.from_iterable(sources)
def gen_entries(parsed):
if parsed.get('bozo_exception'):
raise Exception(parsed['bozo_exception'])
for entry in parsed['entries']:
# prevent feedparser deprecation warnings
if 'published_parsed' in entry:
updated = entry['published_parsed']
else:
updated = entry.get('updated_parsed')
entry['pubDate'] = updated
entry['y:published'] = updated
entry['dc:creator'] = entry.get('author')
entry['author.uri'] = entry.get('author_detail', {}).get(
'href')
entry['author.name'] = entry.get('author_detail', {}).get(
'name')
entry['y:title'] = entry.get('title')
entry['y:id'] = entry.get('id')
yield entry
def gen_items(content, key=None):
if hasattr(content, 'append'):
for nested in content:
for i in gen_items(nested, key):
yield i
elif content:
yield {key: content} if key else content
| mit | 6d6f1a742eeb7f7d80900931a4cf2112 | 28.036082 | 82 | 0.569797 | 3.90187 | false | false | false | false |
nerevu/riko | riko/bado/sux.py | 1 | 21031 | # -*- test-case-name: twisted.web.test.test_xml -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
*S*mall, *U*ncomplicated *X*ML.
This is a very simple implementation of XML/HTML as a network
protocol. It is not at all clever. Its main features are that it
does not:
- support namespaces
- mung mnemonic entity references
- validate
- perform *any* external actions (such as fetching URLs or writing files)
under *any* circumstances
- has lots and lots of horrible hacks for supporting broken HTML (as an
option, they're not on by default).
"""
import pygogo as gogo
from chardet import detect
from meza.compat import decode
try:
from twisted.internet.protocol import Protocol
except ImportError:
Protocol = object
else:
from twisted.python.reflect import prefixedMethodNames as find_method_names
logger = gogo.Gogo(__name__, monolog=True).logger
# Elements of the three-tuples in the state table.
BEGIN_HANDLER = 0
DO_HANDLER = 1
END_HANDLER = 2
IDENTCHARS = '.-_:'
LENIENT_IDENTCHARS = IDENTCHARS + ';+#/%~'
nop = lambda *args, **kwargs: None
def zipfndict(*args):
for fndict in args:
for key in fndict:
yield (key, tuple(x.get(key, nop) for x in args))
def get_method_obj_dict(obj, prefix):
names = find_method_names(obj.__class__, prefix)
return {name: getattr(obj, prefix + name) for name in names}
class ParseError(Exception):
def __init__(self, filename, line, col, message):
self.filename = filename
self.line = line
self.col = col
self.message = message
def __str__(self):
return "%s:%s:%s: %s" % (
self.filename, self.line, self.col, self.message)
class XMLParser(Protocol):
state = None
encoding = None
bom = None
attrname = ''
attrval = ''
# _leadingBodyData will sometimes be set before switching to the
# 'bodydata' state, when we "accidentally" read a byte of bodydata
# in a different state.
_leadingBodyData = None
def __init__(self, filename='unnamed', **kwargs):
self.filename = filename
self.lenient = kwargs.get('lenient')
self.strict = not self.lenient
# protocol methods
def connectionMade(self):
self.lineno = 1
self.colno = 0
def saveMark(self):
'''Get the line number and column of the last character parsed'''
# This gets replaced during dataReceived, restored afterwards
return (self.lineno, self.colno)
def _raise_parse_error(self, message):
raise ParseError(*((self.filename,) + self.saveMark() + (message,)))
def _build_state_table(self):
'''Return a dictionary of begin, do, end state function tuples'''
# _build_state_table leaves something to be desired but it does what it
# does.. probably slowly, so I'm doing some evil caching so it doesn't
# get called more than once per class.
stateTable = getattr(self.__class__, '__stateTable', None)
if stateTable is None:
prefixes = ('begin_', 'do_', 'end_')
fndicts = (get_method_obj_dict(self, p) for p in prefixes)
stateTable = dict(zipfndict(*fndicts))
self.__class__.__stateTable = stateTable
return stateTable
def check_encoding(self, data):
if self.encoding.startswith('UTF-16'):
data = data[2:]
if 'UTF-16' in self.encoding or 'UCS-2' in self.encoding:
assert not len(data) & 1, 'UTF-16 must come in pairs for now'
def maybeBodyData(self):
if self.endtag:
return 'bodydata'
# Get ready for fun! We're going to allow
# <script>if (foo < bar)</script> to work!
# We do this by making everything between <script> and
# </script> a Text
# BUT <script src="foo"> will be special-cased to do regular,
# lenient behavior, because those may not have </script>
# -radix
if (self.tagName == 'script' and 'src' not in self.tagAttributes):
# we do this ourselves rather than having begin_waitforendscript
# because that can get called multiple times and we don't want
# bodydata to get reset other than the first time.
self.begin_bodydata(None)
return 'waitforendscript'
return 'bodydata'
def dataReceived(self, data):
stateTable = self._build_state_table()
self.encoding = self.encoding or detect(data)['encoding']
self.check_encoding(data)
self.state = self.state or 'begin'
content = decode(data, self.encoding)
# bring state, lineno, colno into local scope
lineno, colno = self.lineno, self.colno
curState = self.state
# replace saveMark with a nested scope function
saveMark = lambda: (lineno, colno)
self.saveMark, _saveMark = saveMark, self.saveMark
# fetch functions from the stateTable
beginFn, doFn, endFn = stateTable[curState]
try:
for char in content:
# do newline stuff
if char == '\n':
lineno += 1
colno = 0
else:
colno += 1
newState = doFn(char)
if newState and newState != curState:
# this is the endFn from the previous state
endFn()
curState = newState
beginFn, doFn, endFn = stateTable[curState]
beginFn(char)
finally:
self.saveMark = _saveMark
self.lineno, self.colno = lineno, colno
# state doesn't make sense if there's an exception..
self.state = curState
def connectionLost(self, reason):
"""
End the last state we were in.
"""
stateTable = self._build_state_table()
stateTable[self.state][END_HANDLER]()
# state methods
def do_begin(self, byte):
if byte.isspace():
return
if byte != '<' and self.lenient:
self._leadingBodyData = byte
return 'bodydata'
elif byte != '<':
msg = "First char of document [%r] wasn't <" % (byte,)
self._raise_parse_error(msg)
return 'tagstart'
def begin_comment(self, byte):
self.commentbuf = ''
def do_comment(self, byte):
self.commentbuf += byte
if self.commentbuf.endswith('-->'):
self.gotComment(self.commentbuf[:-3])
return 'bodydata'
def begin_tagstart(self, byte):
self.tagName = '' # name of the tag
self.tagAttributes = {} # attributes of the tag
self.termtag = 0 # is the tag self-terminating
self.endtag = 0
def _get_val(self, byte):
val = None
alnum_or_ident = byte.isalnum() or byte in IDENTCHARS
is_good = alnum_or_ident or byte in '/!?[' or byte.isspace()
if byte == '-' and self.tagName == '!-':
val = 'comment'
elif byte.isspace() and self.tagName:
# properly strict thing to do here is probably to only
# accept whitespace
val = 'waitforgt' if self.endtag else 'attrs'
elif byte in '>/[':
def_gt = self.strict and 'bodydata' or self.maybeBodyData()
switch = {
'>': 'bodydata' if self.endtag else def_gt,
'/': 'afterslash'if self.tagName else None,
'[': 'expectcdata' if self.tagName == '!' else None}
val = switch[byte]
if not (self.lenient or val or is_good):
self._raise_parse_error('Invalid tag character: %r' % byte)
return val
def _update_tags(self, byte):
alnum_or_ident = byte.isalnum() or byte in IDENTCHARS
if (byte in '!?') or alnum_or_ident:
self.tagName += byte
elif byte == '>' and self.endtag:
self.gotTagEnd(self.tagName)
elif byte == '>':
self.gotTagStart(self.tagName, {})
elif byte == '/' and not self.tagName:
self.endtag = 1
elif byte in '!?' and not self.tagName:
self.tagName += byte
self.termtag = 1
def do_tagstart(self, byte):
if byte.isspace() and not self.tagName:
self._raise_parse_error("Whitespace before tag-name")
elif byte in '!?' and self.tagName and self.strict:
self._raise_parse_error("Invalid character in tag-name")
elif byte == '[' and not self.tagName == '!':
self._raise_parse_error("Invalid '[' in tag-name")
val = self._get_val(byte)
self._update_tags(byte)
return val
def begin_unentity(self, byte):
self.bodydata += byte
def do_unentity(self, byte):
self.bodydata += byte
return 'bodydata'
def end_unentity(self):
self.gotText(self.bodydata)
def begin_expectcdata(self, byte):
self.cdatabuf = byte
def do_expectcdata(self, byte):
self.cdatabuf += byte
cdb = self.cdatabuf
cd = '[CDATA['
if len(cd) > len(cdb):
if cd.startswith(cdb):
return
elif self.lenient:
# WHAT THE CRAP!? MSWord9 generates HTML that includes these
# bizarre <![if !foo]> <![endif]> chunks, so I've gotta ignore
# 'em as best I can. this should really be a separate parse
# state but I don't even have any idea what these _are_.
return 'waitforgt'
else:
self._raise_parse_error("Mal-formed CDATA header")
if cd == cdb:
self.cdatabuf = ''
return 'cdata'
self._raise_parse_error("Mal-formed CDATA header")
def do_cdata(self, byte):
self.cdatabuf += byte
if self.cdatabuf.endswith("]]>"):
self.cdatabuf = self.cdatabuf[:-3]
return 'bodydata'
def end_cdata(self):
self.gotCData(self.cdatabuf)
self.cdatabuf = ''
def do_attrs(self, byte):
if byte.isalnum() or byte in IDENTCHARS:
# XXX FIXME really handle !DOCTYPE at some point
if self.tagName == '!DOCTYPE':
return 'doctype'
if self.tagName[0] in '!?':
return 'waitforgt'
return 'attrname'
elif byte.isspace():
return
elif byte == '>':
self.gotTagStart(self.tagName, self.tagAttributes)
return self.strict and 'bodydata' or self.maybeBodyData()
elif byte == '/':
return 'afterslash'
elif self.lenient:
# discard and move on? Only case I've seen of this so far was:
# <foo bar="baz"">
return
self._raise_parse_error("Unexpected character: %r" % byte)
def begin_doctype(self, byte):
self.doctype = byte
def do_doctype(self, byte):
if byte == '>':
return 'bodydata'
self.doctype += byte
def end_doctype(self):
self.gotDoctype(self.doctype)
self.doctype = None
def do_waitforgt(self, byte):
if byte == '>':
if self.endtag or self.lenient:
return 'bodydata'
return self.maybeBodyData()
def begin_attrname(self, byte):
self.attrname = byte
self._attrname_termtag = 0
def _get_attrname(self, byte):
if byte == '=':
val = 'beforeattrval'
elif byte.isspace():
val = 'beforeeq'
elif self.lenient and byte in '"\'':
val = 'attrval'
elif self.lenient and byte == '>':
val = 'bodydata' if self._attrname_termtag else None
else:
# something is really broken. let's leave this attribute where it
# is and move on to the next thing
val = None
return val
def do_attrname(self, byte):
if byte.isalnum() or byte in IDENTCHARS:
self.attrname += byte
elif self.strict and not (byte.isspace() or byte == '='):
msg = "Invalid attribute name: %r %r" % (self.attrname, byte)
self._raise_parse_error(msg)
elif byte in LENIENT_IDENTCHARS or byte.isalnum():
self.attrname += byte
elif byte == '/':
self._attrname_termtag = 1
elif byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
self.gotTagEnd(self.tagName) if self._attrname_termtag else None
return self._get_attrname(byte)
def do_beforeattrval(self, byte):
chars = LENIENT_IDENTCHARS
val = None
if byte in '"\'':
val = 'attrval'
elif byte.isspace():
pass
elif self.lenient and (byte in chars or byte.isalnum()):
val = 'messyattr'
elif self.lenient and byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
val = self.maybeBodyData()
elif self.lenient and byte == '\\':
# I saw this in actual HTML once:
# <font size=\"3\"><sup>SM</sup></font>
pass
else:
msg = 'Invalid initial attribute value: %r; ' % byte
msg += 'Attribute values must be quoted.'
self._raise_parse_error(msg)
return val
def begin_beforeeq(self, byte):
self._beforeeq_termtag = 0
def do_beforeeq(self, byte):
if byte == '=':
return 'beforeattrval'
elif byte.isspace():
return
elif self.lenient:
if byte.isalnum() or byte in IDENTCHARS:
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
return 'attrname'
elif byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if self._beforeeq_termtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
elif byte == '/':
self._beforeeq_termtag = 1
return
self._raise_parse_error("Invalid attribute")
def begin_attrval(self, byte):
self.quotetype = byte
self.attrval = ''
def do_attrval(self, byte):
if byte == self.quotetype:
return 'attrs'
self.attrval += byte
def end_attrval(self):
self.tagAttributes[self.attrname] = self.attrval
self.attrname = self.attrval = ''
def begin_messyattr(self, byte):
self.attrval = byte
def do_messyattr(self, byte):
if byte.isspace():
return 'attrs'
elif byte == '>':
endTag = 0
if self.attrval.endswith('/'):
endTag = 1
self.attrval = self.attrval[:-1]
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if endTag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
else:
self.attrval += byte
def end_messyattr(self):
if self.attrval:
self.tagAttributes[self.attrname] = self.attrval
def begin_afterslash(self, byte):
self._after_slash_closed = 0
def do_afterslash(self, byte):
# this state is only after a self-terminating slash, e.g. <foo/>
if self._after_slash_closed:
self._raise_parse_error("Mal-formed") # XXX When does this happen??
if byte != '>' and self.lenient:
return
elif byte != '>':
self._raise_parse_error("No data allowed after '/'")
self._after_slash_closed = 1
self.gotTagStart(self.tagName, self.tagAttributes)
self.gotTagEnd(self.tagName)
# don't need maybeBodyData here because there better not be
# any javascript code after a <script/>... we'll see :(
return 'bodydata'
def begin_bodydata(self, byte):
if self._leadingBodyData:
self.bodydata = self._leadingBodyData
del self._leadingBodyData
else:
self.bodydata = ''
def do_bodydata(self, byte):
if byte == '<':
return 'tagstart'
if byte == '&':
return 'entityref'
self.bodydata += byte
def end_bodydata(self):
self.gotText(self.bodydata)
self.bodydata = ''
def do_waitforendscript(self, byte):
if byte == '<':
return 'waitscriptendtag'
self.bodydata += byte
def begin_waitscriptendtag(self, byte):
self.temptagdata = ''
self.tagName = ''
self.endtag = 0
def do_waitscriptendtag(self, byte):
# 1 enforce / as first byte read
# 2 enforce following bytes to be subset of "script" until
# tagName == "script"
# 2a when that happens, gotText(self.bodydata) and
# gotTagEnd(self.tagName)
# 3 spaces can happen anywhere, they're ignored
# e.g. < / script >
# 4 anything else causes all data I've read to be moved to the
# bodydata, and switch back to waitforendscript state
# If it turns out this _isn't_ a </script>, we need to
# remember all the data we've been through so we can append it
# to bodydata
self.temptagdata += byte
# 1
if byte == '/':
self.endtag = True
elif not self.endtag:
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
# 2
elif byte.isalnum() or byte in IDENTCHARS:
self.tagName += byte
if not 'script'.startswith(self.tagName):
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
elif self.tagName == 'script':
self.gotText(self.bodydata)
self.gotTagEnd(self.tagName)
return 'waitforgt'
# 3
elif byte.isspace():
return 'waitscriptendtag'
# 4
else:
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
def begin_entityref(self, byte):
self.erefbuf = ''
self.erefextra = '' # extra bit for lenient mode
def do_entityref(self, byte):
if byte.isspace() or byte == "<":
if self.lenient:
# '&foo' probably was '&foo'
if self.erefbuf and self.erefbuf != "amp":
self.erefextra = self.erefbuf
self.erefbuf = "amp"
if byte == "<":
return "tagstart"
else:
self.erefextra += byte
return 'spacebodydata'
self._raise_parse_error("Bad entity reference")
elif byte != ';':
self.erefbuf += byte
else:
return 'bodydata'
def end_entityref(self):
self.gotEntityReference(self.erefbuf)
# hacky support for space after & in entityref in lenient
# state should only happen in that case
def begin_spacebodydata(self, byte):
self.bodydata = self.erefextra
self.erefextra = None
do_spacebodydata = do_bodydata
end_spacebodydata = end_bodydata
# Sorta SAX-ish API
def gotTagStart(self, name, attributes):
'''Encountered an opening tag.
Default behaviour is to print.'''
print('begin', name, attributes)
def gotText(self, data):
'''Encountered text
Default behaviour is to print.'''
print('text:', repr(data))
def gotEntityReference(self, entityRef):
'''Encountered mnemonic entity reference
Default behaviour is to print.'''
print('entityRef: &%s;' % entityRef)
def gotComment(self, comment):
'''Encountered comment.
Default behaviour is to ignore.'''
pass
def gotCData(self, cdata):
'''Encountered CDATA
Default behaviour is to call the gotText method'''
self.gotText(cdata)
def gotDoctype(self, doctype):
"""Encountered DOCTYPE
This is really grotty: it basically just gives you everything between
'<!DOCTYPE' and '>' as an argument.
"""
print('!DOCTYPE', repr(doctype))
def gotTagEnd(self, name):
'''Encountered closing tag
Default behaviour is to print.'''
print('end', name)
| mit | 9caec64927a2a07dd616607a9b317160 | 30.483533 | 80 | 0.559412 | 4.037435 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.