repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
linuxium/ubuntu-yakkety | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
lenovor/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/examples/speech_commands/train.py | 12 | 16629 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
For more information, please see
https://www.tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.int64, [None], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
correct_prediction = tf.equal(predicted_indices, ground_truth_input)
confusion_matrix = tf.confusion_matrix(
ground_truth_input, predicted_indices, num_classes=label_count)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries, evaluation_step, cross_entropy_mean, train_step,
increment_global_step
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_prob: 0.5
})
train_writer.add_summary(train_summary, training_step)
tf.logging.info('Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_prob: 1.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Step %d: Validation accuracy = %.1f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint periodically.
if (training_step % FLAGS.save_step_interval == 0 or
training_step == training_steps_max):
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.model_architecture + '.ckpt')
tf.logging.info('Saving to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
set_size = audio_processor.set_size('testing')
tf.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_prob: 1.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (total_accuracy * 100,
set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
particleKIT/hostlist | hostlist/hostlist.py | 1 | 9496 | #!/usr/bin/env python3
import logging
import types
from collections import defaultdict
import os
import sys
import ipaddress
import itertools
import glob
import yaml
from typing import Dict, Tuple
try:
from yaml import CSafeLoader as SafeLoader # type: ignore
except ImportError:
from yaml import SafeLoader # type: ignore
from . import host
from .config import CONFIGINSTANCE as Config
class Hostlist(list):
def __init__(self):
super().__init__()
self.fileheaders = {}
def __str__(self):
return '\n'.join([str(h) for h in self])
def diff(self, otherhostlist) -> types.SimpleNamespace:
diff = types.SimpleNamespace()
diff.add, diff.remove = [], []
hostnames = {h.fqdn: h.ip for h in self if h.publicip}
inversehostlist = {h.fqdn: h for h in self}
otherhostnames = {h.fqdn: h.ip for h in otherhostlist if h.publicip}
inverseotherhostlist = {h.fqdn: h for h in otherhostlist}
for fqdn, ip in hostnames.items():
if otherhostnames.get(fqdn) != ip:
diff.add.append(inversehostlist[fqdn])
for fqdn, ip in otherhostnames.items():
if hostnames.get(fqdn) != ip:
diff.remove.append(inverseotherhostlist[fqdn])
diff.empty = (not diff.add) and (not diff.remove)
return diff
class DNSVSHostlist(Hostlist):
"Hostlist filed from DNSVS"
def __init__(self, input: Dict[str, Tuple[str, bool]]) -> None:
super().__init__()
for hostname, data in input.items():
ip, is_nonunique = data
self.append(host.Host(hostname, ip, is_nonunique))
class YMLHostlist(Hostlist):
"Hostlist filed from yml file"
def __init__(self):
super().__init__()
self.groups = defaultdict(list)
input_ymls = sorted(glob.glob(Config["hostlistdir"] + '/*.yml'))
logging.debug("Using %s" % ', '.join(input_ymls))
for inputfile in input_ymls:
self._add_ymlhostfile(inputfile)
def _add_ymlhostfile(self, fname):
"parse all hosts in fname and add them to this hostlist"
shortname = os.path.splitext(os.path.basename(fname))[0]
if shortname.count('-') > 1:
logging.error('Filename %s contains to many dashes. Skipped.')
return
if '-' in shortname:
# get abc, def from hostlists/abc-def.yml
hosttype, institute = shortname.split('-')
else:
hosttype = shortname
institute = None
try:
infile = open(fname, 'r')
except:
logging.error('file %s not readable' % fname)
return
try:
yamlsections = yaml.load_all(infile, Loader=SafeLoader)
except yaml.YAMLError as e:
logging.error('file %s not correct yml' % fname)
logging.error(str(e))
return
for yamlout in yamlsections:
self._parse_section(yamlout, fname, hosttype, institute)
self._fix_docker_ports()
def _parse_section(self, yamlout, fname, hosttype, institute):
for field in ('header', 'hosts'):
if field not in yamlout:
logging.error('missing field %s in %s' % (field, fname))
header = yamlout['header']
if 'iprange' in header:
ipstart, ipend = header['iprange']
header['iprange'] = ipaddress.ip_address(ipstart), ipaddress.ip_address(ipend)
self.fileheaders[os.path.basename(fname)] = header
for hostdata in yamlout["hosts"]:
newhost = host.YMLHost(hostdata, hosttype, institute, header)
self.append(newhost)
for group in newhost.groups:
self.groups[group].append(newhost)
def _fix_docker_ports(self):
for h in self:
if 'docker' in h.vars and 'ports' in h.vars['docker']:
# prefix docker ports with container IP
h.vars['docker']['ports'] = [
str(h.ip) + ':' + port for port in h.vars['docker']['ports']
]
def print(self, filter):
filtered = [h for h in self if h.filter(filter)]
for h in filtered:
if logging.getLogger().level == logging.DEBUG:
print(h.output(printgroups=True, printallvars=True))
elif logging.getLogger().level == logging.INFO:
print(h.output(delim='\t', printgroups=True))
else:
print(h.hostname)
def check_consistency(self, cnames):
checks = {
'nonunique': self.check_nonunique(),
'cnames': self.check_cnames(cnames),
'duplicates': self.check_duplicates(),
'missing_mac_ip': self.check_missing_mac_ip(),
}
for h in self:
for hcheck,hstatus in h.run_checks().items():
if not hstatus:
checks.update({hcheck:hstatus})
if isinstance(self, YMLHostlist):
checks.update({'iprange_overlap': self.check_iprange_overlap()})
logging.info("consistency check finished")
for check,status in checks.items():
if not status and not ('ignore_checks' in Config and
check in Config["ignore_checks"]):
sys.exit(1)
def check_nonunique(self):
"""ensure nonunique flag agrees with nonunique_ips config"""
success = True
nonunique_ips = defaultdict(list)
for h in self:
ip_fit = str(h.ip) in Config["nonunique_ips"]
if ip_fit and h.vars['unique']:
nonunique_ips[str(h.ip)].append(h)
if not ip_fit and not h.vars['unique']:
logging.error("Host %s has nonunique ip flag, "
"but its ip is not listed in the config." % h)
success = False
for ip in nonunique_ips:
if len(nonunique_ips[ip]) > 1:
logging.error("More than one host uses a given nonunique ip"
" without being flagged:\n" +
('\n'.join((str(x) for x in nonunique_ips[ip]))))
success = False
return success
def check_cnames(self, cnames):
"""ensure there are no duplicates between hostlist and cnames"""
success = True
for cname in cnames:
has_dest = False
for h in self:
if h.fqdn == cname.fqdn:
logging.error("%s conflicts with %s." % (cname, h))
success = False
if cname.dest == h.fqdn:
has_dest = True
if not has_dest:
logging.error("%s points to a non-existing host." % cname)
success = False
return success
def check_duplicates(self):
"""check consistency of hostlist
detect duplicates (ip, mac, hostname)"""
success = True
inverselist = {}
tocheck_props = ['ip', 'mac', 'hostname']
for prop in tocheck_props:
inverselist[prop] = {}
for h in self:
myhostprop = getattr(h, prop)
if myhostprop is None:
continue
if prop == 'ip' and str(myhostprop) in Config["nonunique_ips"]:
# allow nonunique ips if listed in config
continue
if myhostprop in inverselist[prop]:
logging.error("Found duplicate %s for hosts \n%s\n%s"
% (prop, inverselist[prop][myhostprop], h))
success = False
inverselist[prop][myhostprop] = h
return success
def check_missing_mac_ip(self) -> bool:
"""check if hosts are missing an ip or mac"""
success = True
for h in self:
if 'needs_ip' in h.groups and h.ip is None:
logging.error("Missing IP in %s ", h)
success = False
if isinstance(self, YMLHostlist):
for h in self:
if 'needs_mac' in h.groups and h.mac is None:
logging.error("Missing MAC in %s ", h)
success = False
return success
def check_iprange_overlap(self) -> bool:
"check whether any of the ipranges given in headers overlap"
overlaps = []
for ita, itb in itertools.combinations(self.fileheaders.items(), 2):
filea, headera = ita
fileb, headerb = itb
try:
a = headera['iprange']
b = headerb['iprange']
except KeyError:
# one of the files does not have iprange defined, ignore it
continue
if headera.get('iprange_allow_overlap', False) or \
headerb.get('iprange_allow_overlap', False):
# FIXME: check overlap for internal IPs
continue
# check if there is overlap between a and b
overlap_low = max(a[0], b[0])
overlap_high = min(a[1], b[1])
if overlap_low <= overlap_high:
overlaps.append((overlap_low, overlap_high, filea, fileb))
if overlaps:
for overlap in overlaps:
logging.error("Found overlap from %s to %s in files %s and %s." % overlap)
return not bool(overlaps)
| gpl-3.0 |
grue/kafka-python | kafka/util.py | 15 | 4482 | import binascii
import collections
import struct
import sys
from threading import Thread, Event
import six
from kafka.common import BufferUnderflowError
def crc32(data):
return binascii.crc32(data) & 0xffffffff
def write_int_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>i', -1)
else:
return struct.pack('>i%ds' % len(s), len(s), s)
def write_short_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>h', -1)
elif len(s) > 32767 and sys.version_info < (2, 7):
# Python 2.6 issues a deprecation warning instead of a struct error
raise struct.error(len(s))
else:
return struct.pack('>h%ds' % len(s), len(s), s)
def read_short_string(data, cur):
if len(data) < cur + 2:
raise BufferUnderflowError("Not enough data left")
(strlen,) = struct.unpack('>h', data[cur:cur + 2])
if strlen == -1:
return None, cur + 2
cur += 2
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def read_int_string(data, cur):
if len(data) < cur + 4:
raise BufferUnderflowError(
"Not enough data left to read string len (%d < %d)" %
(len(data), cur + 4))
(strlen,) = struct.unpack('>i', data[cur:cur + 4])
if strlen == -1:
return None, cur + 4
cur += 4
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def relative_unpack(fmt, data, cur):
size = struct.calcsize(fmt)
if len(data) < cur + size:
raise BufferUnderflowError("Not enough data left")
out = struct.unpack(fmt, data[cur:cur + size])
return out, cur + size
def group_by_topic_and_partition(tuples):
out = collections.defaultdict(dict)
for t in tuples:
assert t.topic not in out or t.partition not in out[t.topic], \
'Duplicate {0}s for {1} {2}'.format(t.__class__.__name__,
t.topic, t.partition)
out[t.topic][t.partition] = t
return out
def kafka_bytestring(s):
"""
Takes a string or bytes instance
Returns bytes, encoding strings in utf-8 as necessary
"""
if isinstance(s, six.binary_type):
return s
if isinstance(s, six.string_types):
return s.encode('utf-8')
raise TypeError(s)
class ReentrantTimer(object):
"""
A timer that can be restarted, unlike threading.Timer
(although this uses threading.Timer)
Arguments:
t: timer interval in milliseconds
fn: a callable to invoke
args: tuple of args to be passed to function
kwargs: keyword arguments to be passed to function
"""
def __init__(self, t, fn, *args, **kwargs):
if t <= 0:
raise ValueError('Invalid timeout value')
if not callable(fn):
raise ValueError('fn must be callable')
self.thread = None
self.t = t / 1000.0
self.fn = fn
self.args = args
self.kwargs = kwargs
self.active = None
def _timer(self, active):
# python2.6 Event.wait() always returns None
# python2.7 and greater returns the flag value (true/false)
# we want the flag value, so add an 'or' here for python2.6
# this is redundant for later python versions (FLAG OR FLAG == FLAG)
while not (active.wait(self.t) or active.is_set()):
self.fn(*self.args, **self.kwargs)
def start(self):
if self.thread is not None:
self.stop()
self.active = Event()
self.thread = Thread(target=self._timer, args=(self.active,))
self.thread.daemon = True # So the app exits when main thread exits
self.thread.start()
def stop(self):
if self.thread is None:
return
self.active.set()
self.thread.join(self.t + 1)
# noinspection PyAttributeOutsideInit
self.timer = None
self.fn = None
def __del__(self):
self.stop()
| apache-2.0 |
florianbeer/librenms | services-wrapper.py | 6 | 13196 | #! /usr/bin/env python2
"""
services-wrapper A small tool which wraps around check-services.php and tries to
guide the services process with a more modern approach with a
Queue and workers.
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <neil@librenms.org>
Date: Oct 2016
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 1 thread.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
try:
import json
import os
import Queue
import subprocess
import sys
import threading
import time
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
"""
Fetch configuration details from the config_to_json.php script
"""
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
service_path = config['install_dir'] + '/check-services.php'
db_username = config['db_user']
db_password = config['db_pass']
if config['db_host'][:5].lower() == 'unix:':
db_server = config['db_host']
db_port = 0
elif ':' in config['db_host']:
db_server = config['db_host'].rsplit(':')[0]
db_port = int(config['db_host'].rsplit(':')[1])
else:
db_server = config['db_host']
db_port = 0
db_dbname = config['db_name']
def db_open():
try:
if db_port == 0:
db = MySQLdb.connect(host=db_server, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
service_group = str(config['distributed_poller_group'])
else:
service_group = False
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('service.ping.' + key, key, 60)
if memc.get('service.ping.' + key) == key:
memc.delete('service.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("service.master")) == config['distributed_poller_name']:
print "This system is already joined as the service master."
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print "Registered as Master"
memc.set("service.master", config['distributed_poller_name'], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print "Registered as Node joining Master %s" % memc.get("service.master")
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print "Could not connect to memcached, disabling distributed service checks."
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed discovery."
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
try:
amount_of_workers = int(sys.argv[1])
if amount_of_workers == 0:
print "ERROR: 0 threads is not a valid value"
sys.exit(2)
except:
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if service_group is not False:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN(" + service_group + ") AND `devices`.`disabled` = 0"
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global servicedisco
if servicedisco:
if not IsNode:
memc_touch('service.master', 10)
nodes = memc.get('service.nodes')
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
servicedisco = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
nodeso = nodes
else:
memc_touch('service.nodes', 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global service_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
service_devices += 1
if elapsed_time < 300:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print_queue.task_done()
"""
This class will fork off single instances of the check-services.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not servicedisco or memc.get('service.device.' + str(device_id)) is None:
if servicedisco:
result = memc.add('service.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print "This device (%s) appears to be being service checked by another service node" % (device_id)
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not service checking Device %s as Node. Master will check it." % device_id
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
command = "/usr/bin/env php %s -h %s >> /dev/null 2>&1" % (service_path, device_id)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the service check at %s with %s threads" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print "INFO: services-wrapper checked %s devices in %s seconds with %s workers" % (service_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all service-nodes to finish"
nodes = memc.get("service.nodes")
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print "Clearing Locks"
x = minlocks
while x <= maxlocks:
memc.delete('service.device.' + str(x))
x = x + 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete("service.master")
memc.delete("service.nodes")
else:
memc.decr("service.nodes")
print "Finished %s." % time.time()
# EOC6
show_stopper = False
if total_time > 300:
print "WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads"
print "INFO: in sequential style service checks the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > 300:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do."
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
sys.exit(2)
| gpl-3.0 |
djbaldey/django | django/db/backends/mysql/client.py | 520 | 1518 | import subprocess
from django.db.backends.base.client import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
@classmethod
def settings_to_cmd_args(cls, settings_dict):
args = [cls.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
cert = settings_dict['OPTIONS'].get('ssl', {}).get('ca')
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if cert:
args += ["--ssl-ca=%s" % cert]
if db:
args += [db]
return args
def runshell(self):
args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
subprocess.call(args)
| bsd-3-clause |
DamnWidget/mamba | mamba/enterprise/mysql.py | 3 | 17269 | # -*- test-case-name: mamba.test.test_database -*-
# Copyright (c) 2012 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
.. module:: mysql_adapter
:platform: Unix, Windows
:synopsis: MySQL adapter for create MySQL tables
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import inspect
from storm.expr import Undef
from twisted.python import components
from storm.references import Reference
from storm import variables, properties
from singledispatch import singledispatch
from mamba.utils import config
from mamba.core.interfaces import IMambaSQL
from mamba.core.adapters import MambaSQLAdapter
from mamba.enterprise.common import CommonSQL, NativeEnumVariable, NativeEnum
class MySQLError(Exception):
"""Base class for MySQL related exceptions
"""
class MySQLMissingPrimaryKey(MySQLError):
"""Fired when the model is missing the primary key
"""
class MySQLNotEnumColumn(MySQLError):
"""Fired when parse_enum is called with a column that is not an Enum
"""
class MySQL(CommonSQL):
"""
This class implements the MySQL syntax layer for mamba
:param module: the module to generate MySQL syntax for
:type module: :class:`~mamba.Model`
"""
def __init__(self, model):
self.model = model
self._columns_mapping = {
properties.Bool: 'tinyint',
properties.UUID: 'blob',
properties.RawStr: 'blob',
properties.Pickle: 'varbinary',
properties.JSON: 'blob',
properties.DateTime: 'datetime',
properties.Date: 'date',
properties.Time: 'time',
properties.Enum: 'integer',
NativeEnum: 'enum'
}
self.parse = singledispatch(self.parse)
self.parse.register(properties.Int, self.parse_int)
self.parse.register(properties.Decimal, self.parse_decimal)
self.parse.register(properties.Unicode, self._parse_unicode)
self.parse.register(properties.Float, self._parse_float)
@property
def engine(self):
"""
Return back the type of engine defined for this MySQL table, if
no engnine has been configured use InnoDB as default
"""
if not hasattr(self.model, '__engine__'):
return 'InnoDB'
return self.model.__engine__
@staticmethod
def register():
"""Register this component
"""
try:
components.registerAdapter(MambaSQLAdapter, MySQL, IMambaSQL)
except ValueError:
# component already registered
pass
def get_single_indexes(self):
"""Goes through every field looking for an index parameter.
"""
single_query = []
for column, property_ in self.get_storm_columns():
wrap_column = column._get_column(self.model.__class__)
index = wrap_column.index
unique = wrap_column.unique
if unique:
# We already have a index for this column, so move on.
continue
if index:
query = 'INDEX `{}_ind` (`{}`)'.format(
property_.name, property_.name
)
single_query.append(query)
return single_query
def get_compound_indexes(self):
"""Checks if the model has an __mamba_index__ property.
If so, we create a compound index with the fields specified inside
__mamba_index__. This variable must be a tuple of tuples.
Example: (
('field1', 'field2'),
('field3', 'field4', 'field5')
)
"""
compound_indexes = getattr(self.model, '__mamba_index__', None)
if compound_indexes is None:
return []
compound_query = []
for compound in compound_indexes:
query = 'INDEX `{}_ind` ({})'.format(
'_'.join(compound),
', '.join(['`{}`'.format(c) for c in compound])
)
compound_query.append(query)
return compound_query
def detect_indexes(self):
"""
Go through all the fields defined in the model and create a index
constraint if the index property is set on the field.
"""
indexes = []
indexes.extend(self.get_single_indexes())
indexes.extend(self.get_compound_indexes())
return ', '.join(indexes)
def get_single_uniques(self):
"""Goes through every field looking for an unique parameter.
"""
single_query = []
for column, property_ in self.get_storm_columns():
wrap_column = column._get_column(self.model.__class__)
unique = wrap_column.unique
if unique:
query = 'UNIQUE `{}_uni` (`{}`)'.format(
property_.name,
property_.name
)
single_query.append(query)
return single_query
def get_compound_uniques(self):
"""Checks if the model has an __mamba_unique__ property.
If so, we create a compound unique with the fields specified inside
__mamba_unique__. This variable must be a tuple of tuples.
Example: (
('field1', 'field2'),
('field3', 'field4', 'field5')
)
"""
compound_uniques = getattr(self.model, '__mamba_unique__', None)
if compound_uniques is None:
return []
compound_query = []
for compound in compound_uniques:
query = 'UNIQUE `{}_uni` ({})'.format(
'_'.join(compound),
', '.join(['`{}`'.format(c) for c in compound])
)
compound_query.append(query)
return compound_query
def detect_uniques(self):
"""
Go through all the fields defined in the model and create a unique
key if the unique property is set on the field.
"""
uniques = []
uniques.extend(self.get_single_uniques())
uniques.extend(self.get_compound_uniques())
return ', '.join(uniques)
def parse_references(self):
"""
Get all the :class:`storm.references.Reference` and create foreign
keys for the SQL creation script
If we are using references we should define our classes in a
correct way. If we have a model that have a relation of many
to one, we should define a many-to-one Storm relationship in
that object but we must create a one-to-many relation in the
related model. That means if for example we have a `Customer`
model and an `Adress` model and we need to relate them as
one Customer may have several addresses (in a real application
address may have a relation many-to-many with customer) we
should define a relation with `Reference` from Address to
Customer using a property like `Address.customer_id` and a
`ReferenceSet` from `Customer` to `Address` like:
Customer.addresses = ReferenceSet(Customer.id, Address.id)
In the case of many-to-many relationships, mamba create the
relation tables by itself so you dont need to take care of
yourself.
.. warning:
If no InnoDB is used as engine in MySQL then this is skipped.
:class:`storm.references.ReferenceSet` does not generate
foreign keys by itself. If you need a many2many relation you
should add a Reference for the compound primary key in the
relation table
"""
if self.engine != 'InnoDB':
return
references = []
for attr in inspect.classify_class_attrs(self.model.__class__):
if type(attr.object) is Reference:
relation = attr.object._relation
if relation.on_remote is True:
# Don't create an index for this as is defined on remote.
continue
keys = {
'remote': relation.remote_key,
'local': relation.local_key
}
remote_table = relation.remote_cls.__storm_table__
localkeys = ', '.join(
'`{}`'.format(k.name) for k in keys.get('local')
)
remotekeys = ', '.join(
'`{}`'.format(k.name) for k in keys.get('remote')
)
query = (
'INDEX `{field}_{remote_table}_fk_ind` ({localkeys}), '
'FOREIGN KEY ({localkeys}) REFERENCES `{remote_table}` '
'({remotekeys}) ON UPDATE {on_update} '
'ON DELETE {on_delete}'.format(
field=keys.get('local')[0].name,
remote_table=remote_table,
localkeys=localkeys,
remotekeys=remotekeys,
on_update=getattr(
self.model, '__on_update__', 'RESTRICT'),
on_delete=getattr(
self.model, '__on_delete__', 'RESTRICT')
)
)
references.append(query)
return ', '.join(references)
def parse(self, column):
"""This function is just a fallback to text (tears are comming)
"""
return self._columns_mapping.get(column.__class__, 'text')
def parse_int(self, column):
"""
Parse an specific integer type for MySQL, for example:
smallint UNSIGNED
:param column: the Storm properties column to parse
:type column: :class:`storm.properties.Int`
"""
column_name = column.__class__.__name__
wrap_column = column._get_column(self.model.__class__)
auto_increment = wrap_column.auto_increment
unsigned = wrap_column.unsigned
size = wrap_column.size
return '{}{}{}'.format(
'{}{}'.format(
column_name.lower(),
'({})'.format(size) if size is not Undef else ''
),
' UNSIGNED' if unsigned else '',
' AUTO_INCREMENT' if auto_increment else ''
)
def parse_decimal(self, column):
"""Parse decimal sizes for MySQL, for example:
decimal(10,2)
:param column: the Storm properties column to parse
:type column: :class:`storm.properties.Decimal`
"""
column_name = column.__class__.__name__
wrap_column = column._get_column(self.model.__class__)
size = wrap_column.size
parsed_size = parse_decimal_size(size, column_name)
if type(parsed_size) is not tuple:
return parsed_size
return '{}{}'.format(
column_name.lower(), '({},{})'.format(
*parse_decimal_size(size, column_name))
)
def parse_column(self, column):
"""
Parse a Storm column to the correct MySQL value type. For example,
if we pass a column of type :class:`~mamba.variable.SmallIntVariable`
with name `amount` we get back:
`amount` smallint
:param column: the Storm properties column to parse
:type column: :class:`storm.properties`
"""
column_type = '`{}` {}{}{}'.format(
column._detect_attr_name(self.model.__class__),
self.parse(column),
self._null_allowed(column),
self._default(column)
)
return column_type
def parse_enum(self, column):
"""Parse an enum column
:param column: the Storm properties column to parse
:type column: :class:`storm.properties`
"""
if column.variable_class is not NativeEnumVariable:
raise MySQLNotEnumColumn(
'Column {} is not an Enum column'.format(column)
)
data = column._variable_kwargs.get('_set', set())
return '`{}` enum({})'.format(
column._detect_attr_name(self.model.__class__),
', '.join("'{}'".format(i) for i in data)
)
def detect_primary_key(self):
"""
Detect the primary key for the model and return it back with the
correct MySQL syntax, Example:
PRIMARY KEY(`id`)
:returns: a string with the correct MySQL syntax
:rtype: str
:raises: MySQLMissingPrimaryKey on missing primary key
"""
primary_key = self.get_primary_key_names()
if primary_key is None:
raise MySQLMissingPrimaryKey(
'MySQL based model {} is missing a primary key column'.format(
repr(self.model)
)
)
primary_key_str = ', '.join(['`{}`'.format(c) for c in primary_key])
return 'PRIMARY KEY({})'.format(primary_key_str)
def create_table(self):
"""Return the MySQL syntax for create a table with this model
"""
query = 'CREATE TABLE {} (\n'.format((
'IF NOT EXISTS `{}`'.format(self.model.__storm_table__) if (
config.Database().create_table_behaviours.get(
'create_table_if_not_exists'))
else '`' + self.model.__storm_table__ + '`'
))
primary_keys = self.get_primary_key_columns()
if primary_keys is not None:
for pk in primary_keys:
query += ' {},\n'.format(self.parse_column(pk))
for column, property_ in self.get_storm_columns():
if property_.primary == 1 or self.is_compound_key(property_.name):
continue
if column.variable_class is not NativeEnumVariable:
query += ' {},\n'.format(self.parse_column(column))
else:
query += ' {},\n'.format(self.parse_enum(column))
query += ' {}\n'.format(self.detect_primary_key())
query += '{}'.format(
', {}'.format(self.detect_uniques()) if self.detect_uniques()
else ''
)
query += '{}'.format(
', {}'.format(self.detect_indexes()) if self.detect_indexes()
else ''
)
query += '{}'.format(
', {}'.format(self.parse_references()) if self.parse_references()
else ''
)
query += '\n) ENGINE={} DEFAULT CHARSET=utf8;\n'.format(self.engine)
if (config.Database().create_table_behaviours.get('drop_table')
and not config.Database().create_table_behaviours.get(
'create_if_not_exists')):
query = '{};\n{}'.format(
self.drop_table(),
query
)
return query
def drop_table(self):
"""Return MySQL syntax for drop this model table
"""
existance = config.Database().drop_table_behaviours.get(
'drop_if_exists', True)
query = 'DROP TABLE {}`{}`'.format(
'IF EXISTS ' if existance else '',
self.model.__storm_table__
)
return query
def _default(self, column):
"""
Get the default argument for a column (if any)
:param column: the Storm properties column to parse
:type column: :class:`storm.properties.Property`
"""
if column._variable_kwargs.get('value') is not Undef:
property_column = column._get_column(self.model.__class__)
variable = property_column.variable_factory()
if type(variable._value) is bool:
variable._value = int(variable._value)
if variable._value is None:
variable._value = 'NULL'
if (column.variable_class is variables.DateTimeVariable
or column.variable_class is variables.TimeVariable
or column.variable_class is variables.DateVariable):
if variable._value is not Undef:
variable._value = "'" + str(variable._value) + "'"
if variable._value is not Undef:
return ' default {}'.format(variable._value)
return ''
@singledispatch
def parse_decimal_size(size, column_name=None):
"""This is just a fallbacl for unknown decimal size type
:param size: the given size
:returns: tuple of (length, precission)
"""
return column_name.lower()
@parse_decimal_size.register(list)
@parse_decimal_size.register(tuple)
def _parse_decimal_size_list(size, column_name=None):
"""Parse list decimal size
"""
return size[0], size[1]
@parse_decimal_size.register(str)
def _parse_decimal_size_str(size, column_name=None):
"""Parse str decimal size
"""
size = size.split(',')
if len(size) == 1:
return size[0], 2
else:
return size[0], size[1]
@parse_decimal_size.register(int)
def _parse_decimal_size_int(size, column_name=None):
"""Parse int decimal size
"""
return size, 2
@parse_decimal_size.register(float)
def _parse_decimal_size_float(size, column_name=None):
"""Parse float decimal size
"""
size = str(size).split('.')
return size[0], size[1]
| gpl-3.0 |
FederatedAI/FATE | python/fate_client/pipeline/component/hetero_fast_secureboost.py | 1 | 1367 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.param.boosting_param import HeteroFastSecureBoostParam
from pipeline.component.component_base import Component
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.logger import LOGGER
class HeteroFastSecureBoost(Component, HeteroFastSecureBoostParam):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
# print(self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
HeteroFastSecureBoostParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "HeteroFastSecureBoost"
| apache-2.0 |
nubjs/nubjs | tools/gyp/pylib/gyp/easy_xml.py | 1049 | 4803 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
ishank08/scikit-learn | sklearn/ensemble/base.py | 19 | 5168 | """
Base class for ensemble-based estimators.
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
import numbers
from ..base import clone
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..utils import _get_n_jobs, check_random_state
MAX_RAND_SEED = np.iinfo(np.int32).max
def _set_random_states(estimator, random_state=None):
"""Sets fixed random_state parameters for an estimator
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : numpy.RandomState or int, optional
Random state used to generate integer values.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(MAX_RAND_SEED)
if to_set:
estimator.set_params(**to_set)
class BaseEnsemble(BaseEstimator, MetaEstimatorMixin):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the ensemble is built.
n_estimators : integer
The number of estimators in the ensemble.
estimator_params : list of strings
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
def __init__(self, base_estimator, n_estimators=10,
estimator_params=tuple()):
# Set parameters
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of base_estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# self.estimators_ needs to be filled by the derived classes in fit.
def _validate_estimator(self, default=None):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):
raise ValueError("n_estimators must be an integer, "
"got {0}.".format(type(self.n_estimators)))
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators))
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if self.base_estimator_ is None:
raise ValueError("base_estimator cannot be None")
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**dict((p, getattr(self, p))
for p in self.estimator_params))
if random_state is not None:
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(_get_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
| bsd-3-clause |
Juniper/nova | nova/tests/unit/fake_pci_device_pools.py | 83 | 1457 | # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import pci_device_pool
# This represents the format that PCI device pool info was stored in the DB
# before this info was made into objects.
fake_pool_dict = {
'product_id': 'fake-product',
'vendor_id': 'fake-vendor',
'numa_node': 1,
't1': 'v1',
't2': 'v2',
'count': 2,
}
fake_pool = pci_device_pool.PciDevicePool(count=5,
product_id='foo',
vendor_id='bar',
numa_node=0,
tags={'t1': 'v1', 't2': 'v2'})
fake_pool_primitive = fake_pool.obj_to_primitive()
fake_pool_list = pci_device_pool.PciDevicePoolList(objects=[fake_pool])
fake_pool_list_primitive = fake_pool_list.obj_to_primitive()
| apache-2.0 |
fly19890211/edx-platform | lms/djangoapps/lms_xblock/migrations/0001_initial.py | 110 | 4883 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XBlockAsidesConfig'
db.create_table('lms_xblock_xblockasidesconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('disabled_blocks', self.gf('django.db.models.fields.TextField')(default='about course_info static_tab')),
))
db.send_create_signal('lms_xblock', ['XBlockAsidesConfig'])
def backwards(self, orm):
# Deleting model 'XBlockAsidesConfig'
db.delete_table('lms_xblock_xblockasidesconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lms_xblock.xblockasidesconfig': {
'Meta': {'object_name': 'XBlockAsidesConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'disabled_blocks': ('django.db.models.fields.TextField', [], {'default': "'about course_info static_tab'"}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['lms_xblock'] | agpl-3.0 |
jerome-jacob/selenium | py/selenium/webdriver/opera/webdriver.py | 71 | 3352 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
opera_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
chrome_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
opera_options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, opera_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
| apache-2.0 |
cycotech/WAR-app | env/lib/python3.5/site-packages/django/views/decorators/clickjacking.py | 335 | 1744 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
alexgorban/models | research/attention_ocr/python/train.py | 17 | 7328 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to train the Attention OCR model.
A simple usage example:
python train.py
"""
import collections
import logging
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import app
from tensorflow.python.platform import flags
from tensorflow.contrib.tfprof import model_analyzer
import data_provider
import common_flags
FLAGS = flags.FLAGS
common_flags.define()
# yapf: disable
flags.DEFINE_integer('task', 0,
'The Task ID. This value is used when training with '
'multiple workers to identify each worker.')
flags.DEFINE_integer('ps_tasks', 0,
'The number of parameter servers. If the value is 0, then'
' the parameters are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 60,
'The frequency with which summaries are saved, in '
'seconds.')
flags.DEFINE_integer('save_interval_secs', 600,
'Frequency in seconds of saving the model.')
flags.DEFINE_integer('max_number_of_steps', int(1e10),
'The maximum number of gradient steps.')
flags.DEFINE_string('checkpoint_inception', '',
'Checkpoint to recover inception weights from.')
flags.DEFINE_float('clip_gradient_norm', 2.0,
'If greater than 0 then the gradients would be clipped by '
'it.')
flags.DEFINE_bool('sync_replicas', False,
'If True will synchronize replicas during training.')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of gradients updates before updating params.')
flags.DEFINE_integer('total_num_replicas', 1,
'Total number of worker replicas.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_boolean('reset_train_dir', False,
'If true will delete all files in the train_log_dir')
flags.DEFINE_boolean('show_graph_stats', False,
'Output model size stats to stderr.')
# yapf: enable
TrainingHParams = collections.namedtuple('TrainingHParams', [
'learning_rate',
'optimizer',
'momentum',
'use_augment_input',
])
def get_training_hparams():
return TrainingHParams(
learning_rate=FLAGS.learning_rate,
optimizer=FLAGS.optimizer,
momentum=FLAGS.momentum,
use_augment_input=FLAGS.use_augment_input)
def create_optimizer(hparams):
"""Creates optimized based on the specified flags."""
if hparams.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
hparams.learning_rate, momentum=hparams.momentum)
elif hparams.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
hparams.learning_rate, momentum=hparams.momentum)
return optimizer
def train(loss, init_fn, hparams):
"""Wraps slim.learning.train to run a training loop.
Args:
loss: a loss tensor
init_fn: A callable to be executed after all other initialization is done.
hparams: a model hyper parameters
"""
optimizer = create_optimizer(hparams)
if FLAGS.sync_replicas:
replica_id = tf.constant(FLAGS.task, tf.int32, shape=())
optimizer = tf.LegacySyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
replica_id=replica_id,
total_num_replicas=FLAGS.total_num_replicas)
sync_optimizer = optimizer
startup_delay_steps = 0
else:
startup_delay_steps = 0
sync_optimizer = None
train_op = slim.learning.create_train_op(
loss,
optimizer,
summarize_gradients=True,
clip_gradient_norm=FLAGS.clip_gradient_norm)
slim.learning.train(
train_op=train_op,
logdir=FLAGS.train_log_dir,
graph=loss.graph,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
startup_delay_steps=startup_delay_steps,
sync_optimizer=sync_optimizer,
init_fn=init_fn)
def prepare_training_dir():
if not tf.gfile.Exists(FLAGS.train_log_dir):
logging.info('Create a new training directory %s', FLAGS.train_log_dir)
tf.gfile.MakeDirs(FLAGS.train_log_dir)
else:
if FLAGS.reset_train_dir:
logging.info('Reset the training directory %s', FLAGS.train_log_dir)
tf.gfile.DeleteRecursively(FLAGS.train_log_dir)
tf.gfile.MakeDirs(FLAGS.train_log_dir)
else:
logging.info('Use already existing training directory %s',
FLAGS.train_log_dir)
def calculate_graph_metrics():
param_stats = model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
return param_stats.total_parameters
def main(_):
prepare_training_dir()
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(dataset.num_char_classes,
dataset.max_sequence_length,
dataset.num_of_views, dataset.null_code)
hparams = get_training_hparams()
# If ps_tasks is zero, the local device is used. When using multiple
# (non-local) replicas, the ReplicaDeviceSetter distributes the variables
# across the different devices.
device_setter = tf.train.replica_device_setter(
FLAGS.ps_tasks, merge_devices=True)
with tf.device(device_setter):
data = data_provider.get_data(
dataset,
FLAGS.batch_size,
augment=hparams.use_augment_input,
central_crop_size=common_flags.get_crop_size())
endpoints = model.create_base(data.images, data.labels_one_hot)
total_loss = model.create_loss(data, endpoints)
model.create_summaries(data, endpoints, dataset.charset, is_training=True)
init_fn = model.create_init_fn_to_restore(FLAGS.checkpoint,
FLAGS.checkpoint_inception)
if FLAGS.show_graph_stats:
logging.info('Total number of weights in the graph: %s',
calculate_graph_metrics())
train(total_loss, init_fn, hparams)
if __name__ == '__main__':
app.run()
| apache-2.0 |
veger/ansible | contrib/inventory/cloudstack.py | 42 | 10491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Ansible CloudStack external inventory script.
=============================================
Generates Ansible inventory from CloudStack. Configuration is read from
'cloudstack.ini'. If you need to pass the project, write a simple wrapper
script, e.g. project_cloudstack.sh:
#!/bin/bash
cloudstack.py --project <your_project> $@
When run against a specific host, this script returns the following attributes
based on the data obtained from CloudStack API:
"web01": {
"cpu_number": 2,
"nic": [
{
"ip": "10.102.76.98",
"mac": "02:00:50:99:00:01",
"type": "Isolated",
"netmask": "255.255.255.0",
"gateway": "10.102.76.1"
},
{
"ip": "10.102.138.63",
"mac": "06:b7:5a:00:14:84",
"type": "Shared",
"netmask": "255.255.255.0",
"gateway": "10.102.138.1"
}
],
"default_ip": "10.102.76.98",
"zone": "ZUERICH",
"created": "2014-07-02T07:53:50+0200",
"hypervisor": "VMware",
"memory": 2048,
"state": "Running",
"tags": [],
"cpu_speed": 1800,
"affinity_group": [],
"service_offering": "Small",
"cpu_used": "62%"
}
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
"""
from __future__ import print_function
import sys
import argparse
import json
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
print("Error: CloudStack library must be installed: pip install cs.",
file=sys.stderr)
sys.exit(1)
class CloudStackInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.")
parser.add_argument('--project')
parser.add_argument('--domain')
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
except CloudStackException:
print("Error: Could not connect to CloudStack API", file=sys.stderr)
domain_id = None
if options.domain:
domain_id = self.get_domain_id(options.domain)
project_id = None
if options.project:
project_id = self.get_project_id(options.project, domain_id)
if options.host:
data = self.get_host(options.host, project_id, domain_id)
print(json.dumps(data, indent=2))
elif options.list:
tags = dict()
if options.tag:
tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=')
data = self.get_list(project_id, domain_id, **tags)
print(json.dumps(data, indent=2))
else:
print("usage: --list [--tag <tag>] | --host <hostname> [--project <project>] [--domain <domain_path>]",
file=sys.stderr)
sys.exit(1)
def get_domain_id(self, domain):
domains = self.cs.listDomains(listall=True)
if domains:
for d in domains['domain']:
if d['path'].lower() == domain.lower():
return d['id']
print("Error: Domain %s not found." % domain, file=sys.stderr)
sys.exit(1)
def get_project_id(self, project, domain_id=None):
projects = self.cs.listProjects(domainid=domain_id)
if projects:
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
def get_host(self, name, project_id=None, domain_id=None, **kwargs):
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
data = {}
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
if name == host_name:
data['zone'] = host['zonename']
if 'group' in host:
data['group'] = host['group']
data['state'] = host['state']
data['service_offering'] = host['serviceofferingname']
data['affinity_group'] = host['affinitygroup']
data['security_group'] = host['securitygroup']
data['cpu_number'] = host['cpunumber']
if 'cpu_speed' in host:
data['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['cpu_used'] = host['cpuused']
data['memory'] = host['memory']
data['tags'] = host['tags']
if 'hypervisor' in host:
data['hypervisor'] = host['hypervisor']
data['created'] = host['created']
data['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['nic'].append(nicdata)
if nic['isdefault']:
data['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['default_ip6'] = nic['ip6address']
break
return data
def get_list(self, project_id=None, domain_id=None, **kwargs):
data = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
},
}
groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
if groups:
for group in groups['instancegroup']:
group_name = group['name']
if group_name and group_name not in data:
data[group_name] = {
'hosts': []
}
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
data['all']['hosts'].append(host_name)
data['_meta']['hostvars'][host_name] = {}
# Make a group per zone
data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
group_name = host['zonename']
if group_name not in data:
data[group_name] = {
'hosts': []
}
data[group_name]['hosts'].append(host_name)
if 'group' in host:
data['_meta']['hostvars'][host_name]['group'] = host['group']
data['_meta']['hostvars'][host_name]['state'] = host['state']
data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
if 'cpuspeed' in host:
data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['memory'] = host['memory']
data['_meta']['hostvars'][host_name]['tags'] = host['tags']
if 'hypervisor' in host:
data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['_meta']['hostvars'][host_name]['nic'].append(nicdata)
if nic['isdefault']:
data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['_meta']['hostvars'][host_name]['default_ip6'] = nic['ip6address']
group_name = ''
if 'group' in host:
group_name = host['group']
if group_name and group_name in data:
data[group_name]['hosts'].append(host_name)
return data
if __name__ == '__main__':
CloudStackInventory()
| gpl-3.0 |
nburn42/tensorflow | tensorflow/contrib/text/python/ops/skip_gram_ops_test.py | 50 | 23107 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from tensorflow.contrib import lookup
from tensorflow.contrib import text
from tensorflow.contrib.text.python.ops import skip_gram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class SkipGramOpsTest(test.TestCase):
def _split_tokens_labels(self, output):
tokens = [x[0] for x in output]
labels = [x[1] for x in output]
return tokens, labels
def test_skip_gram_sample_skips_2(self):
"""Tests skip-gram with min_skips = max_skips = 2."""
input_tensor = constant_op.constant(
[b"the", b"quick", b"brown", b"fox", b"jumps"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=2, max_skips=2)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"the", b"brown"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"quick", b"fox"),
(b"brown", b"the"),
(b"brown", b"quick"),
(b"brown", b"fox"),
(b"brown", b"jumps"),
(b"fox", b"quick"),
(b"fox", b"brown"),
(b"fox", b"jumps"),
(b"jumps", b"brown"),
(b"jumps", b"fox"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_emit_self(self):
"""Tests skip-gram with emit_self_as_target = True."""
input_tensor = constant_op.constant(
[b"the", b"quick", b"brown", b"fox", b"jumps"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=2, max_skips=2, emit_self_as_target=True)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"the"),
(b"the", b"quick"),
(b"the", b"brown"),
(b"quick", b"the"),
(b"quick", b"quick"),
(b"quick", b"brown"),
(b"quick", b"fox"),
(b"brown", b"the"),
(b"brown", b"quick"),
(b"brown", b"brown"),
(b"brown", b"fox"),
(b"brown", b"jumps"),
(b"fox", b"quick"),
(b"fox", b"brown"),
(b"fox", b"fox"),
(b"fox", b"jumps"),
(b"jumps", b"brown"),
(b"jumps", b"fox"),
(b"jumps", b"jumps"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_skips_0(self):
"""Tests skip-gram with min_skips = max_skips = 0."""
input_tensor = constant_op.constant([b"the", b"quick", b"brown"])
# If emit_self_as_target is False (default), output will be empty.
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=0, max_skips=0, emit_self_as_target=False)
with self.test_session():
self.assertEqual(0, tokens.eval().size)
self.assertEqual(0, labels.eval().size)
# If emit_self_as_target is True, each token will be its own label.
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=0, max_skips=0, emit_self_as_target=True)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"the"),
(b"quick", b"quick"),
(b"brown", b"brown"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_skips_exceed_length(self):
"""Tests skip-gram when min/max_skips exceed length of input."""
input_tensor = constant_op.constant([b"the", b"quick", b"brown"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=100, max_skips=100)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"the", b"brown"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"brown", b"the"),
(b"brown", b"quick"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_start_limit(self):
"""Tests skip-gram over a limited portion of the input."""
input_tensor = constant_op.constant(
[b"foo", b"the", b"quick", b"brown", b"bar"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=1, start=1, limit=3)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"brown", b"quick"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_limit_exceeds(self):
"""Tests skip-gram when limit exceeds the length of the input."""
input_tensor = constant_op.constant([b"foo", b"the", b"quick", b"brown"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=1, start=1, limit=100)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"brown", b"quick"),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_random_skips(self):
"""Tests skip-gram with min_skips != max_skips, with random output."""
# The number of outputs is non-deterministic in this case, so set random
# seed to help ensure the outputs remain constant for this test case.
random_seed.set_random_seed(42)
input_tensor = constant_op.constant(
[b"the", b"quick", b"brown", b"fox", b"jumps", b"over"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=2, seed=9)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"the", b"brown"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"quick", b"fox"),
(b"brown", b"the"),
(b"brown", b"quick"),
(b"brown", b"fox"),
(b"brown", b"jumps"),
(b"fox", b"brown"),
(b"fox", b"jumps"),
(b"jumps", b"fox"),
(b"jumps", b"over"),
(b"over", b"fox"),
(b"over", b"jumps"),
])
with self.test_session() as sess:
tokens_eval, labels_eval = sess.run([tokens, labels])
self.assertAllEqual(expected_tokens, tokens_eval)
self.assertAllEqual(expected_labels, labels_eval)
def test_skip_gram_sample_random_skips_default_seed(self):
"""Tests outputs are still random when no op-level seed is specified."""
# This is needed since tests set a graph-level seed by default. We want to
# explicitly avoid setting both graph-level seed and op-level seed, to
# simulate behavior under non-test settings when the user doesn't provide a
# seed to us. This results in random_seed.get_seed() returning None for both
# seeds, forcing the C++ kernel to execute its default seed logic.
random_seed.set_random_seed(None)
# Uses an input tensor with 10 words, with possible skip ranges in [1,
# 5]. Thus, the probability that two random samplings would result in the
# same outputs is 1/5^10 ~ 1e-7 (aka the probability of this test being
# flaky).
input_tensor = constant_op.constant([str(x) for x in range(10)])
# Do not provide an op-level seed here!
tokens_1, labels_1 = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=5)
tokens_2, labels_2 = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=5)
with self.test_session() as sess:
tokens_1_eval, labels_1_eval, tokens_2_eval, labels_2_eval = sess.run(
[tokens_1, labels_1, tokens_2, labels_2])
if len(tokens_1_eval) == len(tokens_2_eval):
self.assertNotEqual(tokens_1_eval.tolist(), tokens_2_eval.tolist())
if len(labels_1_eval) == len(labels_2_eval):
self.assertNotEqual(labels_1_eval.tolist(), labels_2_eval.tolist())
def test_skip_gram_sample_batch(self):
"""Tests skip-gram with batching."""
input_tensor = constant_op.constant([b"the", b"quick", b"brown", b"fox"])
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=1, batch_size=3)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"quick"),
(b"quick", b"the"),
(b"quick", b"brown"),
(b"brown", b"quick"),
(b"brown", b"fox"),
(b"fox", b"brown"),
])
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
tokens_eval, labels_eval = sess.run([tokens, labels])
self.assertAllEqual(expected_tokens[:3], tokens_eval)
self.assertAllEqual(expected_labels[:3], labels_eval)
tokens_eval, labels_eval = sess.run([tokens, labels])
self.assertAllEqual(expected_tokens[3:6], tokens_eval)
self.assertAllEqual(expected_labels[3:6], labels_eval)
coord.request_stop()
coord.join(threads)
def test_skip_gram_sample_non_string_input(self):
"""Tests skip-gram with non-string input."""
input_tensor = constant_op.constant([1, 2, 3], dtype=dtypes.int16)
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=1, max_skips=1)
expected_tokens, expected_labels = self._split_tokens_labels([
(1, 2),
(2, 1),
(2, 3),
(3, 2),
])
with self.test_session():
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def test_skip_gram_sample_errors(self):
"""Tests various errors raised by skip_gram_sample()."""
input_tensor = constant_op.constant([b"the", b"quick", b"brown"])
invalid_skips = (
# min_skips and max_skips must be >= 0.
(-1, 2),
(1, -2),
# min_skips must be <= max_skips.
(2, 1))
for min_skips, max_skips in invalid_skips:
tokens, labels = text.skip_gram_sample(
input_tensor, min_skips=min_skips, max_skips=max_skips)
with self.test_session() as sess, self.assertRaises(
errors.InvalidArgumentError):
sess.run([tokens, labels])
# input_tensor must be of rank 1.
with self.assertRaises(ValueError):
invalid_tensor = constant_op.constant([[b"the"], [b"quick"], [b"brown"]])
text.skip_gram_sample(invalid_tensor)
# vocab_freq_table must be provided if vocab_min_count, vocab_subsampling,
# or corpus_size is specified.
dummy_input = constant_op.constant([""])
with self.assertRaises(ValueError):
text.skip_gram_sample(
dummy_input, vocab_freq_table=None, vocab_min_count=1)
with self.assertRaises(ValueError):
text.skip_gram_sample(
dummy_input, vocab_freq_table=None, vocab_subsampling=1e-5)
with self.assertRaises(ValueError):
text.skip_gram_sample(dummy_input, vocab_freq_table=None, corpus_size=100)
with self.assertRaises(ValueError):
text.skip_gram_sample(
dummy_input,
vocab_freq_table=None,
vocab_subsampling=1e-5,
corpus_size=100)
# vocab_subsampling and corpus_size must both be present or absent.
dummy_table = lookup.HashTable(
lookup.KeyValueTensorInitializer([b"foo"], [10]), -1)
with self.assertRaises(ValueError):
text.skip_gram_sample(
dummy_input,
vocab_freq_table=dummy_table,
vocab_subsampling=None,
corpus_size=100)
with self.assertRaises(ValueError):
text.skip_gram_sample(
dummy_input,
vocab_freq_table=dummy_table,
vocab_subsampling=1e-5,
corpus_size=None)
def test_filter_input_filter_vocab(self):
"""Tests input filtering based on vocab frequency table and thresholds."""
input_tensor = constant_op.constant(
[b"the", b"answer", b"to", b"life", b"and", b"universe"])
keys = constant_op.constant([b"and", b"life", b"the", b"to", b"universe"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
vocab_freq_table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), -1)
with self.test_session():
vocab_freq_table.init.run()
# No vocab_freq_table specified - output should be the same as input.
no_table_output = skip_gram_ops._filter_input(
input_tensor=input_tensor,
vocab_freq_table=None,
vocab_min_count=None,
vocab_subsampling=None,
corpus_size=None,
seed=None)
self.assertAllEqual(input_tensor.eval(), no_table_output.eval())
# vocab_freq_table specified, but no vocab_min_count - output should have
# filtered out tokens not in the table (b"answer").
table_output = skip_gram_ops._filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=None,
vocab_subsampling=None,
corpus_size=None,
seed=None)
self.assertAllEqual([b"the", b"to", b"life", b"and", b"universe"],
table_output.eval())
# vocab_freq_table and vocab_min_count specified - output should have
# filtered out tokens whose frequencies are below the threshold
# (b"and": 0, b"life": 1).
threshold_output = skip_gram_ops._filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=2,
vocab_subsampling=None,
corpus_size=None,
seed=None)
self.assertAllEqual([b"the", b"to", b"universe"], threshold_output.eval())
def test_filter_input_subsample_vocab(self):
"""Tests input filtering based on vocab subsampling."""
# The outputs are non-deterministic, so set random seed to help ensure that
# the outputs remain constant for testing.
random_seed.set_random_seed(42)
input_tensor = constant_op.constant([
# keep_prob = (sqrt(30/(0.05*100)) + 1) * (0.05*100/30) = 0.57.
b"the",
b"answer", # Not in vocab. (Always discarded)
b"to", # keep_prob = 0.75.
b"life", # keep_prob > 1. (Always kept)
b"and", # keep_prob = 0.48.
b"universe" # Below vocab threshold of 3. (Always discarded)
])
keys = constant_op.constant([b"and", b"life", b"the", b"to", b"universe"])
values = constant_op.constant([40, 8, 30, 20, 2], dtypes.int64)
vocab_freq_table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), -1)
with self.test_session():
vocab_freq_table.init.run()
output = skip_gram_ops._filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=3,
vocab_subsampling=0.05,
corpus_size=math_ops.reduce_sum(values),
seed=9)
self.assertAllEqual([b"the", b"to", b"life", b"and"], output.eval())
def _make_text_vocab_freq_file(self):
filepath = os.path.join(test.get_temp_dir(), "vocab_freq.txt")
with open(filepath, "w") as f:
writer = csv.writer(f)
writer.writerows([
["and", 40],
["life", 8],
["the", 30],
["to", 20],
["universe", 2],
])
return filepath
def _make_text_vocab_float_file(self):
filepath = os.path.join(test.get_temp_dir(), "vocab_freq_float.txt")
with open(filepath, "w") as f:
writer = csv.writer(f)
writer.writerows([
["and", 0.4],
["life", 0.08],
["the", 0.3],
["to", 0.2],
["universe", 0.02],
])
return filepath
def test_skip_gram_sample_with_text_vocab_filter_vocab(self):
"""Tests skip-gram sampling with text vocab and freq threshold filtering."""
input_tensor = constant_op.constant([
b"the",
b"answer", # Will be filtered before candidate generation.
b"to",
b"life",
b"and",
b"universe" # Will be filtered before candidate generation.
])
# b"answer" is not in vocab file, and b"universe"'s frequency is below
# threshold of 3.
vocab_freq_file = self._make_text_vocab_freq_file()
tokens, labels = text.skip_gram_sample_with_text_vocab(
input_tensor=input_tensor,
vocab_freq_file=vocab_freq_file,
vocab_token_index=0,
vocab_freq_index=1,
vocab_min_count=3,
min_skips=1,
max_skips=1)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"to"),
(b"to", b"the"),
(b"to", b"life"),
(b"life", b"to"),
(b"life", b"and"),
(b"and", b"life"),
])
with self.test_session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(expected_tokens, tokens.eval())
self.assertAllEqual(expected_labels, labels.eval())
def _text_vocab_subsample_vocab_helper(self, vocab_freq_file, vocab_min_count,
vocab_freq_dtype, corpus_size=None):
# The outputs are non-deterministic, so set random seed to help ensure that
# the outputs remain constant for testing.
random_seed.set_random_seed(42)
input_tensor = constant_op.constant([
# keep_prob = (sqrt(30/(0.05*100)) + 1) * (0.05*100/30) = 0.57.
b"the",
b"answer", # Not in vocab. (Always discarded)
b"to", # keep_prob = 0.75.
b"life", # keep_prob > 1. (Always kept)
b"and", # keep_prob = 0.48.
b"universe" # Below vocab threshold of 3. (Always discarded)
])
# keep_prob calculated from vocab file with relative frequencies of:
# and: 40
# life: 8
# the: 30
# to: 20
# universe: 2
tokens, labels = text.skip_gram_sample_with_text_vocab(
input_tensor=input_tensor,
vocab_freq_file=vocab_freq_file,
vocab_token_index=0,
vocab_freq_index=1,
vocab_freq_dtype=vocab_freq_dtype,
vocab_min_count=vocab_min_count,
vocab_subsampling=0.05,
corpus_size=corpus_size,
min_skips=1,
max_skips=1,
seed=123)
expected_tokens, expected_labels = self._split_tokens_labels([
(b"the", b"to"),
(b"to", b"the"),
(b"to", b"life"),
(b"life", b"to"),
])
with self.test_session() as sess:
lookup_ops.tables_initializer().run()
tokens_eval, labels_eval = sess.run([tokens, labels])
self.assertAllEqual(expected_tokens, tokens_eval)
self.assertAllEqual(expected_labels, labels_eval)
def test_skip_gram_sample_with_text_vocab_subsample_vocab(self):
"""Tests skip-gram sampling with text vocab and vocab subsampling."""
# Vocab file frequencies
# and: 40
# life: 8
# the: 30
# to: 20
# universe: 2
#
# corpus_size for the above vocab is 40+8+30+20+2 = 100.
text_vocab_freq_file = self._make_text_vocab_freq_file()
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_freq_file,
vocab_min_count=3,
vocab_freq_dtype=dtypes.int64)
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_freq_file,
vocab_min_count=3,
vocab_freq_dtype=dtypes.int64,
corpus_size=100)
# The user-supplied corpus_size should not be less than the sum of all
# the frequency counts of vocab_freq_file, which is 100.
with self.assertRaises(ValueError):
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_freq_file,
vocab_min_count=3,
vocab_freq_dtype=dtypes.int64,
corpus_size=99)
def test_skip_gram_sample_with_text_vocab_subsample_vocab_float(self):
"""Tests skip-gram sampling with text vocab and subsampling with floats."""
# Vocab file frequencies
# and: 0.4
# life: 0.08
# the: 0.3
# to: 0.2
# universe: 0.02
#
# corpus_size for the above vocab is 0.4+0.08+0.3+0.2+0.02 = 1.
text_vocab_float_file = self._make_text_vocab_float_file()
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_float_file,
vocab_min_count=0.03,
vocab_freq_dtype=dtypes.float32)
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_float_file,
vocab_min_count=0.03,
vocab_freq_dtype=dtypes.float32,
corpus_size=1.0)
# The user-supplied corpus_size should not be less than the sum of all
# the frequency counts of vocab_freq_file, which is 1.
with self.assertRaises(ValueError):
self._text_vocab_subsample_vocab_helper(
vocab_freq_file=text_vocab_float_file,
vocab_min_count=0.03,
vocab_freq_dtype=dtypes.float32,
corpus_size=0.99)
def test_skip_gram_sample_with_text_vocab_errors(self):
"""Tests various errors raised by skip_gram_sample_with_text_vocab()."""
dummy_input = constant_op.constant([""])
vocab_freq_file = self._make_text_vocab_freq_file()
invalid_indices = (
# vocab_token_index can't be negative.
(-1, 0),
# vocab_freq_index can't be negative.
(0, -1),
# vocab_token_index can't be equal to vocab_freq_index.
(0, 0),
(1, 1),
# vocab_freq_file only has two columns.
(0, 2),
(2, 0))
for vocab_token_index, vocab_freq_index in invalid_indices:
with self.assertRaises(ValueError):
text.skip_gram_sample_with_text_vocab(
input_tensor=dummy_input,
vocab_freq_file=vocab_freq_file,
vocab_token_index=vocab_token_index,
vocab_freq_index=vocab_freq_index)
if __name__ == "__main__":
test.main()
| apache-2.0 |
zaenalarifin/openshot_jmd | openshot/windows/TransitionProperties.py | 1 | 6330 | # OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os
import gtk, gtk.glade
from classes import messagebox, profiles, project, video
from windows.SimpleGladeApp import SimpleGladeApp
# init the foreign language
from language import Language_Init
class frmTransitionProperties(SimpleGladeApp):
def __init__(self, path="TransitionProperties.glade", root="frmTransitionProperties", domain="OpenShot", form=None, project=None, current_transition=None, **kwargs):
SimpleGladeApp.__init__(self, os.path.join(project.GLADE_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self._ = _
# add items to direction combo
options = [_("Transition"), _("Mask")]
# loop through export to options
for option in options:
# append profile to list
self.cboType.append_text(option)
# add items to direction combo
options = [_("Up"), _("Down")]
# loop through export to options
for option in options:
# append profile to list
self.cboDirection.append_text(option)
self.form = form
self.project = project
self.current_transition = current_transition
self.frmTransitionProperties.show_all()
# init the project type properties
self.lblName.set_text(self.current_transition.name)
self.hsSoftness.set_value(self.current_transition.softness * 100.0)
self.hsThreshold.set_value(self.current_transition.mask_value)
# set the dropdown boxes
self.set_type_dropdown()
self.set_direction_dropdown()
def set_type_dropdown(self):
# get correct gettext method
_ = self._
# get the model and iterator of the project type dropdown box
model = self.cboType.get_model()
iter = model.get_iter_first()
while True:
# get the value of each item in the dropdown
value = model.get_value(iter, 0)
# check for the matching project type
if self.current_transition.type == "mask" and value.lower() == _("Mask").lower():
# set the item as active
self.cboType.set_active_iter(iter)
break
# check for the matching project type
if self.current_transition.type == "transition" and value.lower() == _("Transition").lower():
# set the item as active
self.cboType.set_active_iter(iter)
break
# get the next item in the list
iter = model.iter_next(iter)
# break loop when no more dropdown items are found
if iter is None:
break
# disable if mask threshold
if self.current_transition.type == "transition":
self.hsThreshold.set_sensitive(False)
else:
self.hsThreshold.set_sensitive(True)
def set_direction_dropdown(self):
# get correct gettext method
_ = self._
# get the model and iterator of the project type dropdown box
model = self.cboDirection.get_model()
iter = model.get_iter_first()
while True:
# get the value of each item in the dropdown
value = model.get_value(iter, 0).lower()
# check for the matching project type
if self.current_transition.reverse == False and value == _("Up").lower():
# set the item as active
self.cboDirection.set_active_iter(iter)
# check for the matching project type
if self.current_transition.reverse == True and value == _("Down").lower():
# set the item as active
self.cboDirection.set_active_iter(iter)
# get the next item in the list
iter = model.iter_next(iter)
# break loop when no more dropdown items are found
if iter is None:
break
# disable if mask
if self.current_transition.type == _("Mask").lower():
self.cboDirection.set_sensitive(False)
else:
self.cboDirection.set_sensitive(True)
def on_cboType_changed(self, widget, *args):
print "on_cboType_changed"
# get correct gettext method
_ = self._
# get new type
localType = self.cboType.get_active_text()
# disable if mask
if localType.lower() == _("Mask").lower():
self.cboDirection.set_sensitive(False)
else:
self.cboDirection.set_sensitive(True)
# disable if mask threshold
if localType.lower() == _("Transition").lower():
self.hsThreshold.set_sensitive(False)
else:
self.hsThreshold.set_sensitive(True)
def on_btnCancel_clicked(self, widget, *args):
print "on_btnCancel_clicked"
self.frmTransitionProperties.destroy()
def on_btnApply_clicked(self, widget, *args):
print "on_btnApply_clicked"
# get correct gettext method
_ = self._
# Get settings
localcboType = self.cboType.get_active_text()
localcboDirection = self.cboDirection.get_active_text().lower()
localhsSoftness = self.hsSoftness.get_value()
localhsThreshold = self.hsThreshold.get_value()
# update transition object
if localcboType.lower() == _("Mask").lower():
self.current_transition.type = "mask"
else:
self.current_transition.type = "transition"
if localcboDirection == _("Up").lower():
self.current_transition.reverse = False
else:
self.current_transition.reverse = True
self.current_transition.softness = float(localhsSoftness) / 100.0
self.current_transition.mask_value = localhsThreshold
# mark project as modified
self.project.set_project_modified(is_modified=True, refresh_xml=True)
# Refresh the MLT XML file
self.project.RefreshXML()
# Refresh form
self.project.form.refresh()
# close window
self.frmTransitionProperties.destroy()
def main():
frmTransitionProperties = frmTransitionProperties()
frmTransitionProperties.run()
if __name__ == "__main__":
main()
| gpl-3.0 |
jstasiak/python-zeroconf | tests/test_updates.py | 1 | 2301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit tests for zeroconf._services. """
import logging
import socket
from threading import Event
import pytest
import zeroconf as r
from zeroconf import const
from zeroconf import Zeroconf
from zeroconf._services.browser import ServiceBrowser
from zeroconf._services.info import ServiceInfo
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
def test_legacy_record_update_listener():
"""Test a RecordUpdateListener that does not implement update_records."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
with pytest.raises(RuntimeError):
r.RecordUpdateListener().update_record(
zc, 0, r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
)
updates = []
class LegacyRecordUpdateListener(r.RecordUpdateListener):
"""A RecordUpdateListener that does not implement update_records."""
def update_record(self, zc: 'Zeroconf', now: float, record: r.DNSRecord) -> None:
nonlocal updates
updates.append(record)
listener = LegacyRecordUpdateListener()
zc.add_listener(listener, None)
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
# start a browser
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
browser = ServiceBrowser(zc, type_, [on_service_state_change])
info_service = ServiceInfo(
type_,
'%s.%s' % (name, type_),
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zc.register_service(info_service)
zc.wait(1)
browser.cancel()
assert len(updates)
assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1
zc.remove_listener(listener)
# Removing a second time should not throw
zc.remove_listener(listener)
zc.close()
| lgpl-2.1 |
sajeeshcs/nested_projects_keystone | keystone/tests/test_no_admin_token_auth.py | 6 | 2214 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import webtest
from keystone import tests
class TestNoAdminTokenAuth(tests.TestCase):
def setUp(self):
super(TestNoAdminTokenAuth, self).setUp()
self.load_backends()
self._generate_paste_config()
self.admin_app = webtest.TestApp(
self.loadapp(tests.dirs.tmp('no_admin_token_auth'), name='admin'),
extra_environ=dict(REMOTE_ADDR='127.0.0.1'))
self.addCleanup(setattr, self, 'admin_app', None)
def _generate_paste_config(self):
# Generate a file, based on keystone-paste.ini, that doesn't include
# admin_token_auth in the pipeline
with open(tests.dirs.etc('keystone-paste.ini'), 'r') as f:
contents = f.read()
new_contents = contents.replace(' admin_token_auth ', ' ')
filename = tests.dirs.tmp('no_admin_token_auth-paste.ini')
with open(filename, 'w') as f:
f.write(new_contents)
self.addCleanup(os.remove, filename)
def test_request_no_admin_token_auth(self):
# This test verifies that if the admin_token_auth middleware isn't
# in the paste pipeline that users can still make requests.
# Note(blk-u): Picked /v2.0/tenants because it's an operation that
# requires is_admin in the context, any operation that requires
# is_admin would work for this test.
REQ_PATH = '/v2.0/tenants'
# If the following does not raise, then the test is successful.
self.admin_app.get(REQ_PATH, headers={'X-Auth-Token': 'NotAdminToken'},
status=401)
| apache-2.0 |
edx/edx-platform | common/djangoapps/student/management/tests/test_transfer_students.py | 4 | 5736 | """
Tests the transfer student management command
"""
import unittest
from unittest.mock import call, patch
import ddt
from django.conf import settings
from django.core.management import call_command
from opaque_keys.edx import locator
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import (
EVENT_NAME_ENROLLMENT_ACTIVATED,
EVENT_NAME_ENROLLMENT_DEACTIVATED,
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
CourseEnrollment
)
from common.djangoapps.student.signals import UNENROLL_DONE
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestTransferStudents(ModuleStoreTestCase):
"""
Tests for transferring students between courses.
"""
PASSWORD = 'test'
signal_fired = False
def setUp(self, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Connect a stub receiver, and analytics event tracking.
"""
super().setUp()
UNENROLL_DONE.connect(self.assert_unenroll_signal)
patcher = patch('common.djangoapps.student.models.tracker')
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(UNENROLL_DONE.disconnect, self.assert_unenroll_signal)
def assert_unenroll_signal(self, skip_refund=False, **kwargs): # pylint: disable=unused-argument
"""
Signal Receiver stub for testing that the unenroll signal was fired.
"""
assert not self.signal_fired
assert skip_refund
self.signal_fired = True
def test_transfer_students(self):
"""
Verify the transfer student command works as intended.
"""
student = UserFactory.create()
student.set_password(self.PASSWORD)
student.save()
mode = 'verified'
# Original Course
original_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
course = self._create_course(original_course_location)
# Enroll the student in 'verified'
CourseEnrollment.enroll(student, course.id, mode='verified')
# Create and purchase a verified cert for the original course.
self._create_and_purchase_verified(student, course.id)
# New Course 1
course_location_one = locator.CourseLocator('Org1', 'Course1', 'Run1')
new_course_one = self._create_course(course_location_one)
# New Course 2
course_location_two = locator.CourseLocator('Org2', 'Course2', 'Run2')
new_course_two = self._create_course(course_location_two)
original_key = str(course.id)
new_key_one = str(new_course_one.id)
new_key_two = str(new_course_two.id)
# Run the actual management command
call_command(
'transfer_students',
'--from', original_key,
'--to', new_key_one, new_key_two,
)
assert self.signal_fired
# Confirm the analytics event was emitted.
self.mock_tracker.emit.assert_has_calls(
[
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_DEACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
)
]
)
self.mock_tracker.reset_mock()
# Confirm the enrollment mode is verified on the new courses, and enrollment is enabled as appropriate.
assert (mode, False) == CourseEnrollment.enrollment_mode_for_user(student, course.id)
assert (mode, True) == CourseEnrollment.enrollment_mode_for_user(student, new_course_one.id)
assert (mode, True) == CourseEnrollment.enrollment_mode_for_user(student, new_course_two.id)
def _create_course(self, course_location):
"""
Creates a course
"""
return CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
def _create_and_purchase_verified(self, student, course_id): # lint-amnesty, pylint: disable=unused-argument
"""
Creates a verified mode for the course and purchases it for the student.
"""
course_mode = CourseMode(course_id=course_id,
mode_slug='verified',
mode_display_name='verified cert',
min_price=50)
course_mode.save()
| agpl-3.0 |
varunarya10/boto | boto/vpc/vpngateway.py | 170 | 2853 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Vpn Gateway
"""
from boto.ec2.ec2object import TaggedEC2Object
class Attachment(object):
def __init__(self, connection=None):
self.vpc_id = None
self.state = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'vpcId':
self.vpc_id = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
class VpnGateway(TaggedEC2Object):
def __init__(self, connection=None):
super(VpnGateway, self).__init__(connection)
self.id = None
self.type = None
self.state = None
self.availability_zone = None
self.attachments = []
def __repr__(self):
return 'VpnGateway:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(VpnGateway, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'item':
att = Attachment()
self.attachments.append(att)
return att
def endElement(self, name, value, connection):
if name == 'vpnGatewayId':
self.id = value
elif name == 'type':
self.type = value
elif name == 'state':
self.state = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'attachments':
pass
else:
setattr(self, name, value)
def attach(self, vpc_id, dry_run=False):
return self.connection.attach_vpn_gateway(
self.id,
vpc_id,
dry_run=dry_run
)
| mit |
abhi11/tanglu-dak | dak/generate_packages_sources2.py | 3 | 15706 | #!/usr/bin/python
"""
Generate Packages/Sources files
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2011 Ansgar Burchardt <ansgar@debian.org>
@copyright: Based on daklib/lists.py and dak/generate_filelist.py:
2009-2011 Torsten Werner <twerner@debian.org>
@copyright: Based on dak/generate_packages_sources.py:
2000, 2001, 2002, 2006 James Troup <james@nocrew.org>
2009 Mark Hymers <mhy@debian.org>
2010 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import apt_pkg, sys
def usage():
print """Usage: dak generate-packages-sources2 [OPTIONS]
Generate the Packages/Sources files
-a, --archive=ARCHIVE process suites in ARCHIVE
-s, --suite=SUITE process this suite
Default: All suites not marked 'untouchable'
-f, --force Allow processing of untouchable suites
CAREFUL: Only to be used at point release time!
-h, --help show this help and exit
SUITE can be a space seperated list, e.g.
--suite=unstable testing
"""
sys.exit()
#############################################################################
# Here be dragons.
_sources_query = R"""
SELECT
(SELECT
STRING_AGG(
CASE
WHEN key = 'Source' THEN E'Package\: '
WHEN key = 'Files' THEN E'Files\:\n ' || f.md5sum || ' ' || f.size || ' ' || SUBSTRING(f.filename FROM E'/([^/]*)\\Z')
WHEN key = 'Checksums-Sha1' THEN E'Checksums-Sha1\:\n ' || f.sha1sum || ' ' || f.size || ' ' || SUBSTRING(f.filename FROM E'/([^/]*)\\Z')
WHEN key = 'Checksums-Sha256' THEN E'Checksums-Sha256\:\n ' || f.sha256sum || ' ' || f.size || ' ' || SUBSTRING(f.filename FROM E'/([^/]*)\\Z')
ELSE key || E'\: '
END || value, E'\n' ORDER BY mk.ordering, mk.key)
FROM
source_metadata sm
JOIN metadata_keys mk ON mk.key_id = sm.key_id
WHERE s.id=sm.src_id
)
||
CASE
WHEN src_associations_full.extra_source THEN E'\nExtra-Source-Only\: yes'
ELSE ''
END
||
E'\nDirectory\: pool/' || :component_name || '/' || SUBSTRING(f.filename FROM E'\\A(.*)/[^/]*\\Z')
||
E'\nPriority\: ' || COALESCE(pri.priority, 'extra')
||
E'\nSection\: ' || COALESCE(sec.section, 'misc')
FROM
source s
JOIN src_associations_full ON src_associations_full.suite = :suite AND s.id = src_associations_full.source
JOIN files f ON s.file=f.id
JOIN files_archive_map fam
ON fam.file_id = f.id
AND fam.archive_id = (SELECT archive_id FROM suite WHERE id = :suite)
AND fam.component_id = :component
LEFT JOIN override o ON o.package = s.source
AND o.suite = :overridesuite
AND o.component = :component
AND o.type = :dsc_type
LEFT JOIN section sec ON o.section = sec.id
LEFT JOIN priority pri ON o.priority = pri.id
WHERE
(src_associations_full.extra_source OR o.suite IS NOT NULL)
ORDER BY
s.source, s.version
"""
def generate_sources(suite_id, component_id):
global _sources_query
from daklib.filewriter import SourcesFileWriter
from daklib.dbconn import Component, DBConn, OverrideType, Suite
from daklib.dakmultiprocessing import PROC_STATUS_SUCCESS
session = DBConn().session()
dsc_type = session.query(OverrideType).filter_by(overridetype='dsc').one().overridetype_id
suite = session.query(Suite).get(suite_id)
component = session.query(Component).get(component_id)
overridesuite_id = suite.get_overridesuite().suite_id
writer_args = {
'archive': suite.archive.path,
'suite': suite.suite_name,
'component': component.component_name
}
if suite.indices_compression is not None:
writer_args['compression'] = suite.indices_compression
writer = SourcesFileWriter(**writer_args)
output = writer.open()
# run query and write Sources
r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "component_name": component.component_name, "dsc_type": dsc_type, "overridesuite": overridesuite_id})
for (stanza,) in r:
print >>output, stanza
print >>output, ""
writer.close()
message = ["generate sources", suite.suite_name, component.component_name]
session.rollback()
return (PROC_STATUS_SUCCESS, message)
#############################################################################
# Here be large dragons.
_packages_query = R"""
WITH
tmp AS (
SELECT
b.id AS binary_id,
b.package AS package,
b.version AS version,
b.architecture AS architecture,
b.source AS source_id,
s.source AS source,
f.filename AS filename,
f.size AS size,
f.md5sum AS md5sum,
f.sha1sum AS sha1sum,
f.sha256sum AS sha256sum
FROM
binaries b
JOIN bin_associations ba ON b.id = ba.bin
JOIN files f ON f.id = b.file
JOIN files_archive_map fam ON f.id = fam.file_id AND fam.archive_id = :archive_id
JOIN source s ON b.source = s.id
WHERE
(b.architecture = :arch_all OR b.architecture = :arch) AND b.type = :type_name
AND ba.suite = :suite
AND fam.component_id = :component
)
SELECT
(SELECT
STRING_AGG(key || E'\: ' || value, E'\n' ORDER BY ordering, key)
FROM
(SELECT key, ordering,
CASE WHEN :include_long_description = 'false' AND key = 'Description'
THEN SUBSTRING(value FROM E'\\A[^\n]*')
ELSE value
END AS value
FROM
binaries_metadata bm
JOIN metadata_keys mk ON mk.key_id = bm.key_id
WHERE
bm.bin_id = tmp.binary_id
AND key != ALL (:metadata_skip)
) AS metadata
)
|| COALESCE(E'\n' || (SELECT
STRING_AGG(key || E'\: ' || value, E'\n' ORDER BY key)
FROM external_overrides eo
WHERE
eo.package = tmp.package
AND eo.suite = :overridesuite AND eo.component = :component
), '')
|| E'\nSection\: ' || sec.section
|| E'\nPriority\: ' || pri.priority
|| E'\nFilename\: pool/' || :component_name || '/' || tmp.filename
|| E'\nSize\: ' || tmp.size
|| E'\nMD5sum\: ' || tmp.md5sum
|| E'\nSHA1\: ' || tmp.sha1sum
|| E'\nSHA256\: ' || tmp.sha256sum
FROM
tmp
JOIN override o ON o.package = tmp.package
JOIN section sec ON sec.id = o.section
JOIN priority pri ON pri.id = o.priority
WHERE
(
architecture <> :arch_all
OR
(architecture = :arch_all AND source_id IN (SELECT source_id FROM tmp WHERE architecture <> :arch_all))
OR
(architecture = :arch_all AND source NOT IN (SELECT DISTINCT source FROM tmp WHERE architecture <> :arch_all))
)
AND
o.type = :type_id AND o.suite = :overridesuite AND o.component = :component
ORDER BY tmp.source, tmp.package, tmp.version
"""
def generate_packages(suite_id, component_id, architecture_id, type_name):
global _packages_query
from daklib.filewriter import PackagesFileWriter
from daklib.dbconn import Architecture, Component, DBConn, OverrideType, Suite
from daklib.dakmultiprocessing import PROC_STATUS_SUCCESS
session = DBConn().session()
arch_all_id = session.query(Architecture).filter_by(arch_string='all').one().arch_id
type_id = session.query(OverrideType).filter_by(overridetype=type_name).one().overridetype_id
suite = session.query(Suite).get(suite_id)
component = session.query(Component).get(component_id)
architecture = session.query(Architecture).get(architecture_id)
overridesuite_id = suite.get_overridesuite().suite_id
include_long_description = suite.include_long_description
# We currently filter out the "Tag" line. They are set by external
# overrides and NOT by the maintainer. And actually having it set by
# maintainer means we output it twice at the moment -> which breaks
# dselect.
metadata_skip = ["Section", "Priority", "Tag"]
if include_long_description:
metadata_skip.append("Description-md5")
writer_args = {
'archive': suite.archive.path,
'suite': suite.suite_name,
'component': component.component_name,
'architecture': architecture.arch_string,
'debtype': type_name
}
if suite.indices_compression is not None:
writer_args['compression'] = suite.indices_compression
writer = PackagesFileWriter(**writer_args)
output = writer.open()
r = session.execute(_packages_query, {"archive_id": suite.archive.archive_id,
"suite": suite_id, "component": component_id, 'component_name': component.component_name,
"arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id,
"overridesuite": overridesuite_id, "metadata_skip": metadata_skip,
"include_long_description": 'true' if include_long_description else 'false'})
for (stanza,) in r:
print >>output, stanza
print >>output, ""
writer.close()
message = ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string]
session.rollback()
return (PROC_STATUS_SUCCESS, message)
#############################################################################
_translations_query = """
WITH
override_suite AS
(SELECT
s.id AS id,
COALESCE(os.id, s.id) AS overridesuite_id
FROM suite AS s LEFT JOIN suite AS os ON s.overridesuite = os.suite_name)
SELECT
E'Package\: ' || b.package
|| E'\nDescription-md5\: ' || bm_description_md5.value
|| E'\nDescription-en\: ' || bm_description.value
|| E'\n'
FROM binaries b
-- join tables for suite and component
JOIN bin_associations ba ON b.id = ba.bin
JOIN override_suite os ON os.id = ba.suite
JOIN override o ON b.package = o.package AND o.suite = os.overridesuite_id AND o.type = (SELECT id FROM override_type WHERE type = 'deb')
-- join tables for Description and Description-md5
JOIN binaries_metadata bm_description ON b.id = bm_description.bin_id AND bm_description.key_id = (SELECT key_id FROM metadata_keys WHERE key = 'Description')
JOIN binaries_metadata bm_description_md5 ON b.id = bm_description_md5.bin_id AND bm_description_md5.key_id = (SELECT key_id FROM metadata_keys WHERE key = 'Description-md5')
-- we want to sort by source name
JOIN source s ON b.source = s.id
WHERE ba.suite = :suite AND o.component = :component
GROUP BY b.package, bm_description_md5.value, bm_description.value
ORDER BY MIN(s.source), b.package, bm_description_md5.value
"""
def generate_translations(suite_id, component_id):
global _translations_query
from daklib.filewriter import TranslationFileWriter
from daklib.dbconn import DBConn, Suite, Component
from daklib.dakmultiprocessing import PROC_STATUS_SUCCESS
session = DBConn().session()
suite = session.query(Suite).get(suite_id)
component = session.query(Component).get(component_id)
writer_args = {
'archive': suite.archive.path,
'suite': suite.suite_name,
'component': component.component_name,
'language': 'en',
}
if suite.i18n_compression is not None:
writer_args['compression'] = suite.i18n_compression
writer = TranslationFileWriter(**writer_args)
output = writer.open()
r = session.execute(_translations_query, {"suite": suite_id, "component": component_id})
for (stanza,) in r:
print >>output, stanza
writer.close()
message = ["generate-translations", suite.suite_name, component.component_name]
session.rollback()
return (PROC_STATUS_SUCCESS, message)
#############################################################################
def main():
from daklib.config import Config
from daklib import daklog
cnf = Config()
Arguments = [('h',"help","Generate-Packages-Sources::Options::Help"),
('a','archive','Generate-Packages-Sources::Options::Archive','HasArg'),
('s',"suite","Generate-Packages-Sources::Options::Suite"),
('f',"force","Generate-Packages-Sources::Options::Force"),
('o','option','','ArbItem')]
suite_names = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
try:
Options = cnf.subtree("Generate-Packages-Sources::Options")
except KeyError:
Options = {}
if Options.has_key("Help"):
usage()
from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS, PROC_STATUS_SIGNALRAISED
pool = DakProcessPool()
logger = daklog.Logger('generate-packages-sources2')
from daklib.dbconn import Component, DBConn, get_suite, Suite, Archive
session = DBConn().session()
session.execute("SELECT add_missing_description_md5()")
session.commit()
if Options.has_key("Suite"):
suites = []
for s in suite_names:
suite = get_suite(s.lower(), session)
if suite:
suites.append(suite)
else:
print "I: Cannot find suite %s" % s
logger.log(['Cannot find suite %s' % s])
else:
query = session.query(Suite).filter(Suite.untouchable == False)
if 'Archive' in Options:
query = query.join(Suite.archive).filter(Archive.archive_name==Options['Archive'])
suites = query.all()
force = Options.has_key("Force") and Options["Force"]
def parse_results(message):
# Split out into (code, msg)
code, msg = message
if code == PROC_STATUS_SUCCESS:
logger.log([msg])
elif code == PROC_STATUS_SIGNALRAISED:
logger.log(['E: Subprocess recieved signal ', msg])
else:
logger.log(['E: ', msg])
# Lock tables so that nobody can change things underneath us
session.execute("LOCK TABLE src_associations IN SHARE MODE")
session.execute("LOCK TABLE bin_associations IN SHARE MODE")
for s in suites:
component_ids = [ c.component_id for c in s.components ]
if s.untouchable and not force:
import daklib.utils
daklib.utils.fubar("Refusing to touch %s (untouchable and not forced)" % s.suite_name)
for c in component_ids:
pool.apply_async(generate_sources, [s.suite_id, c], callback=parse_results)
if not s.include_long_description:
pool.apply_async(generate_translations, [s.suite_id, c], callback=parse_results)
for a in s.architectures:
if a == 'source':
continue
pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=parse_results)
pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=parse_results)
pool.close()
pool.join()
# this script doesn't change the database
session.close()
logger.close()
sys.exit(pool.overall_status())
if __name__ == '__main__':
main()
| gpl-2.0 |
yetilinux/yetiweb | main/migrations/0044_auto__chg_field_todolist_date_added.py | 4 | 12236 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.alter_column('todolists', 'date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
def backwards(self, orm):
db.alter_column('todolists', 'date_added', self.gf('django.db.models.fields.DateField')(auto_now_add=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.arch': {
'Meta': {'ordering': "['name']", 'object_name': 'Arch', 'db_table': "'arches'"},
'agnostic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.donor': {
'Meta': {'ordering': "['name']", 'object_name': 'Donor', 'db_table': "'donors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'main.package': {
'Meta': {'ordering': "('pkgname',)", 'object_name': 'Package', 'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'epoch': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'files_last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'packager_str': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'main.packagedepend': {
'Meta': {'object_name': 'PackageDepend', 'db_table': "'package_depends'"},
'depname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'depvcmp': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.packagefile': {
'Meta': {'object_name': 'PackageFile', 'db_table': "'package_files'"},
'directory': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_directory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.repo': {
'Meta': {'ordering': "['name']", 'object_name': 'Repo', 'db_table': "'repos'"},
'bugs_project': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'svn_root': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.signoff': {
'Meta': {'object_name': 'Signoff'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolist': {
'Meta': {'object_name': 'Todolist', 'db_table': "'todolists'"},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolistpkg': {
'Meta': {'unique_together': "(('list', 'pkg'),)", 'object_name': 'TodolistPkg', 'db_table': "'todolist_pkgs'"},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Todolist']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'user_profiles'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'allowed_repos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Repo']", 'symmetrical': 'False', 'blank': 'True'}),
'favorite_distros': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'default': "'devs/silhouette.png'", 'max_length': '100'}),
'public_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.CharField', [], {'default': "'UTC'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'yob': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
| gpl-2.0 |
MoonshineSG/OctoPrint | src/octoprint/plugin/__init__.py | 3 | 23582 | # coding=utf-8
"""
This module represents OctoPrint's plugin subsystem. This includes management and helper methods as well as the
registered plugin types.
.. autofunction:: plugin_manager
.. autofunction:: plugin_settings
.. autofunction:: call_plugin
.. autoclass:: PluginSettings
:members:
"""
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import os
import logging
from octoprint.settings import settings as s
from octoprint.plugin.core import (PluginInfo, PluginManager, Plugin)
from octoprint.plugin.types import *
from octoprint.util import deprecated
# singleton
_instance = None
def _validate_plugin(phase, plugin_info):
if phase == "after_load":
if plugin_info.implementation is not None and isinstance(plugin_info.implementation, AppPlugin):
# transform app plugin into hook
import warnings
warnings.warn("{name} uses deprecated plugin mixin AppPlugin, use octoprint.accesscontrol.appkey hook instead".format(name=plugin_info.key), DeprecationWarning)
hooks = plugin_info.hooks
if not "octoprint.accesscontrol.appkey" in hooks:
hooks["octoprint.accesscontrol.appkey"] = plugin_info.implementation.get_additional_apps
setattr(plugin_info.instance, PluginInfo.attr_hooks, hooks)
return True
def plugin_manager(init=False, plugin_folders=None, plugin_types=None, plugin_entry_points=None, plugin_disabled_list=None,
plugin_restart_needing_hooks=None, plugin_obsolete_hooks=None, plugin_validators=None):
"""
Factory method for initially constructing and consecutively retrieving the :class:`~octoprint.plugin.core.PluginManager`
singleton.
Arguments:
init (boolean): A flag indicating whether this is the initial call to construct the singleton (True) or not
(False, default). If this is set to True and the plugin manager has already been initialized, a :class:`ValueError`
will be raised. The same will happen if the plugin manager has not yet been initialized and this is set to
False.
plugin_folders (list): A list of folders (as strings containing the absolute path to them) in which to look for
potential plugin modules. If not provided this defaults to the configured ``plugins`` base folder and
``src/plugins`` within OctoPrint's code base.
plugin_types (list): A list of recognized plugin types for which to look for provided implementations. If not
provided this defaults to the plugin types found in :mod:`octoprint.plugin.types` without
:class:`~octoprint.plugin.OctoPrintPlugin`.
plugin_entry_points (list): A list of entry points pointing to modules which to load as plugins. If not provided
this defaults to the entry point ``octoprint.plugin``.
plugin_disabled_list (list): A list of plugin identifiers that are currently disabled. If not provided this
defaults to all plugins for which ``enabled`` is set to ``False`` in the settings.
plugin_restart_needing_hooks (list): A list of hook namespaces which cause a plugin to need a restart in order
be enabled/disabled. Does not have to contain full hook identifiers, will be matched with startswith similar
to logging handlers
plugin_obsolete_hooks (list): A list of hooks that have been declared obsolete. Plugins implementing them will
not be enabled since they might depend on functionality that is no longer available.
plugin_validators (list): A list of additional plugin validators through which to process each plugin.
Returns:
PluginManager: A fully initialized :class:`~octoprint.plugin.core.PluginManager` instance to be used for plugin
management tasks.
Raises:
ValueError: ``init`` was True although the plugin manager was already initialized, or it was False although
the plugin manager was not yet initialized.
"""
global _instance
if _instance is not None:
if init:
raise ValueError("Plugin Manager already initialized")
else:
if init:
if plugin_types is None:
plugin_types = [StartupPlugin,
ShutdownPlugin,
TemplatePlugin,
SettingsPlugin,
SimpleApiPlugin,
AssetPlugin,
BlueprintPlugin,
EventHandlerPlugin,
SlicerPlugin,
AppPlugin,
ProgressPlugin,
WizardPlugin,
UiPlugin]
if plugin_restart_needing_hooks is None:
plugin_restart_needing_hooks = ["octoprint.server.http"]
if plugin_obsolete_hooks is None:
plugin_obsolete_hooks = ["octoprint.comm.protocol.gcode"]
if plugin_validators is None:
plugin_validators = [_validate_plugin]
else:
plugin_validators.append(_validate_plugin)
_instance = PluginManager(plugin_folders,
plugin_types,
plugin_entry_points,
logging_prefix="octoprint.plugins.",
plugin_disabled_list=plugin_disabled_list,
plugin_restart_needing_hooks=plugin_restart_needing_hooks,
plugin_obsolete_hooks=plugin_obsolete_hooks,
plugin_validators=plugin_validators)
else:
raise ValueError("Plugin Manager not initialized yet")
return _instance
def plugin_settings(plugin_key, defaults=None, get_preprocessors=None, set_preprocessors=None, settings=None):
"""
Factory method for creating a :class:`PluginSettings` instance.
Arguments:
plugin_key (string): The plugin identifier for which to create the settings instance.
defaults (dict): The default settings for the plugin, if different from get_settings_defaults.
get_preprocessors (dict): The getter preprocessors for the plugin.
set_preprocessors (dict): The setter preprocessors for the plugin.
settings (octoprint.settings.Settings): The settings instance to use.
Returns:
PluginSettings: A fully initialized :class:`PluginSettings` instance to be used to access the plugin's
settings
"""
if settings is None:
settings = s()
return PluginSettings(settings, plugin_key, defaults=defaults,
get_preprocessors=get_preprocessors,
set_preprocessors=set_preprocessors)
def plugin_settings_for_settings_plugin(plugin_key, instance, settings=None):
"""
Factory method for creating a :class:`PluginSettings` instance for a given :class:`SettingsPlugin` instance.
Will return `None` if the provided `instance` is not a :class:`SettingsPlugin` instance.
Arguments:
plugin_key (string): The plugin identifier for which to create the settings instance.
implementation (octoprint.plugin.SettingsPlugin): The :class:`SettingsPlugin` instance.
settings (octoprint.settings.Settings): The settings instance to use. Defaults to the global OctoPrint settings.
Returns:
PluginSettings or None: A fully initialized :class:`PluginSettings` instance to be used to access the plugin's
settings, or `None` if the provided `instance` was not a class:`SettingsPlugin`
"""
if not isinstance(instance, SettingsPlugin):
return None
try:
get_preprocessors, set_preprocessors = instance.get_settings_preprocessors()
except:
logging.getLogger(__name__).exception("Error while retrieving preprocessors for plugin {}".format(plugin_key))
return None
return plugin_settings(plugin_key, get_preprocessors=get_preprocessors, set_preprocessors=set_preprocessors, settings=settings)
def call_plugin(types, method, args=None, kwargs=None, callback=None, error_callback=None, sorting_context=None):
"""
Helper method to invoke the indicated ``method`` on all registered plugin implementations implementing the
indicated ``types``. Allows providing method arguments and registering callbacks to call in case of success
and/or failure of each call which can be used to return individual results to the calling code.
Example:
.. sourcecode:: python
def my_success_callback(name, plugin, result):
print("{name} was called successfully and returned {result!r}".format(**locals()))
def my_error_callback(name, plugin, exc):
print("{name} raised an exception: {exc!s}".format(**locals()))
octoprint.plugin.call_plugin(
[octoprint.plugin.StartupPlugin],
"on_startup",
args=(my_host, my_port),
callback=my_success_callback,
error_callback=my_error_callback
)
Arguments:
types (list): A list of plugin implementation types to match against.
method (string): Name of the method to call on all matching implementations.
args (tuple): A tuple containing the arguments to supply to the called ``method``. Optional.
kwargs (dict): A dictionary containing the keyword arguments to supply to the called ``method``. Optional.
callback (function): A callback to invoke after an implementation has been called successfully. Will be called
with the three arguments ``name``, ``plugin`` and ``result``. ``name`` will be the plugin identifier,
``plugin`` the plugin implementation instance itself and ``result`` the result returned from the
``method`` invocation.
error_callback (function): A callback to invoke after the call of an implementation resulted in an exception.
Will be called with the three arguments ``name``, ``plugin`` and ``exc``. ``name`` will be the plugin
identifier, ``plugin`` the plugin implementation instance itself and ``exc`` the caught exception.
"""
if not isinstance(types, (list, tuple)):
types = [types]
if args is None:
args = []
if kwargs is None:
kwargs = dict()
plugins = plugin_manager().get_implementations(*types, sorting_context=sorting_context)
for plugin in plugins:
if hasattr(plugin, method):
try:
result = getattr(plugin, method)(*args, **kwargs)
if callback:
callback(plugin._identifier, plugin, result)
except Exception as exc:
logging.getLogger(__name__).exception("Error while calling plugin %s" % plugin._identifier)
if error_callback:
error_callback(plugin._identifier, plugin, exc)
class PluginSettings(object):
"""
The :class:`PluginSettings` class is the interface for plugins to their own or globally defined settings.
It provides a couple of convenience methods for directly accessing plugin settings via the regular
:class:`octoprint.settings.Settings` interfaces as well as means to access plugin specific folder locations.
All getter and setter methods will ensure that plugin settings are stored in their correct location within the
settings structure by modifying the supplied paths accordingly.
Arguments:
settings (Settings): The :class:`~octoprint.settings.Settings` instance on which to operate.
plugin_key (str): The plugin identifier of the plugin for which to create this instance.
defaults (dict): The plugin's defaults settings, will be used to determine valid paths within the plugin's
settings structure
.. method:: get(path, merged=False, asdict=False)
Retrieves a raw value from the settings for ``path``, optionally merging the raw value with the default settings
if ``merged`` is set to True.
:param path: The path for which to retrieve the value.
:type path: list, tuple
:param boolean merged: Whether to merge the returned result with the default settings (True) or not (False,
default).
:returns: The retrieved settings value.
:rtype: object
.. method:: get_int(path)
Like :func:`get` but tries to convert the retrieved value to ``int``.
.. method:: get_float(path)
Like :func:`get` but tries to convert the retrieved value to ``float``.
.. method:: get_boolean(path)
Like :func:`get` but tries to convert the retrieved value to ``boolean``.
.. method:: set(path, value, force=False)
Sets the raw value on the settings for ``path``.
:param path: The path for which to retrieve the value.
:type path: list, tuple
:param object value: The value to set.
:param boolean force: If set to True, the modified configuration will even be written back to disk if
the value didn't change.
.. method:: set_int(path, value, force=False)
Like :func:`set` but ensures the value is an ``int`` through attempted conversion before setting it.
.. method:: set_float(path, value, force=False)
Like :func:`set` but ensures the value is an ``float`` through attempted conversion before setting it.
.. method:: set_boolean(path, value, force=False)
Like :func:`set` but ensures the value is an ``boolean`` through attempted conversion before setting it.
"""
def __init__(self, settings, plugin_key, defaults=None, get_preprocessors=None, set_preprocessors=None):
self.settings = settings
self.plugin_key = plugin_key
if defaults is not None:
self.defaults = dict(plugins=dict())
self.defaults["plugins"][plugin_key] = defaults
self.defaults["plugins"][plugin_key]["_config_version"] = None
else:
self.defaults = None
if get_preprocessors is None:
get_preprocessors = dict()
self.get_preprocessors = dict(plugins=dict())
self.get_preprocessors["plugins"][plugin_key] = get_preprocessors
if set_preprocessors is None:
set_preprocessors = dict()
self.set_preprocessors = dict(plugins=dict())
self.set_preprocessors["plugins"][plugin_key] = set_preprocessors
def prefix_path_in_args(args, index=0):
result = []
if index == 0:
result.append(self._prefix_path(args[0]))
result.extend(args[1:])
else:
args_before = args[:index - 1]
args_after = args[index + 1:]
result.extend(args_before)
result.append(self._prefix_path(args[index]))
result.extend(args_after)
return result
def add_getter_kwargs(kwargs):
if not "defaults" in kwargs and self.defaults is not None:
kwargs.update(defaults=self.defaults)
if not "preprocessors" in kwargs:
kwargs.update(preprocessors=self.get_preprocessors)
return kwargs
def add_setter_kwargs(kwargs):
if not "defaults" in kwargs and self.defaults is not None:
kwargs.update(defaults=self.defaults)
if not "preprocessors" in kwargs:
kwargs.update(preprocessors=self.set_preprocessors)
return kwargs
self.access_methods = dict(
has =("has", prefix_path_in_args, add_getter_kwargs),
get =("get", prefix_path_in_args, add_getter_kwargs),
get_int =("getInt", prefix_path_in_args, add_getter_kwargs),
get_float =("getFloat", prefix_path_in_args, add_getter_kwargs),
get_boolean=("getBoolean", prefix_path_in_args, add_getter_kwargs),
set =("set", prefix_path_in_args, add_setter_kwargs),
set_int =("setInt", prefix_path_in_args, add_setter_kwargs),
set_float =("setFloat", prefix_path_in_args, add_setter_kwargs),
set_boolean=("setBoolean", prefix_path_in_args, add_setter_kwargs),
remove =("remove", prefix_path_in_args, lambda x: x)
)
self.deprecated_access_methods = dict(
getInt ="get_int",
getFloat ="get_float",
getBoolean="get_boolean",
setInt ="set_int",
setFloat ="set_float",
setBoolean="set_boolean"
)
def _prefix_path(self, path=None):
if path is None:
path = list()
return ['plugins', self.plugin_key] + path
def global_has(self, path, **kwargs):
return self.settings.has(path, **kwargs)
def global_remove(self, path, **kwargs):
return self.settings.remove(path, **kwargs)
def global_get(self, path, **kwargs):
"""
Getter for retrieving settings not managed by the plugin itself from the core settings structure. Use this
to access global settings outside of your plugin.
Directly forwards to :func:`octoprint.settings.Settings.get`.
"""
return self.settings.get(path, **kwargs)
def global_get_int(self, path, **kwargs):
"""
Like :func:`global_get` but directly forwards to :func:`octoprint.settings.Settings.getInt`.
"""
return self.settings.getInt(path, **kwargs)
def global_get_float(self, path, **kwargs):
"""
Like :func:`global_get` but directly forwards to :func:`octoprint.settings.Settings.getFloat`.
"""
return self.settings.getFloat(path, **kwargs)
def global_get_boolean(self, path, **kwargs):
"""
Like :func:`global_get` but directly orwards to :func:`octoprint.settings.Settings.getBoolean`.
"""
return self.settings.getBoolean(path, **kwargs)
def global_set(self, path, value, **kwargs):
"""
Setter for modifying settings not managed by the plugin itself on the core settings structure. Use this
to modify global settings outside of your plugin.
Directly forwards to :func:`octoprint.settings.Settings.set`.
"""
self.settings.set(path, value, **kwargs)
def global_set_int(self, path, value, **kwargs):
"""
Like :func:`global_set` but directly forwards to :func:`octoprint.settings.Settings.setInt`.
"""
self.settings.setInt(path, value, **kwargs)
def global_set_float(self, path, value, **kwargs):
"""
Like :func:`global_set` but directly forwards to :func:`octoprint.settings.Settings.setFloat`.
"""
self.settings.setFloat(path, value, **kwargs)
def global_set_boolean(self, path, value, **kwargs):
"""
Like :func:`global_set` but directly forwards to :func:`octoprint.settings.Settings.setBoolean`.
"""
self.settings.setBoolean(path, value, **kwargs)
def global_get_basefolder(self, folder_type, **kwargs):
"""
Retrieves a globally defined basefolder of the given ``folder_type``. Directly forwards to
:func:`octoprint.settings.Settings.getBaseFolder`.
"""
return self.settings.getBaseFolder(folder_type, **kwargs)
def get_plugin_logfile_path(self, postfix=None):
"""
Retrieves the path to a logfile specifically for the plugin. If ``postfix`` is not supplied, the logfile
will be named ``plugin_<plugin identifier>.log`` and located within the configured ``logs`` folder. If a
postfix is supplied, the name will be ``plugin_<plugin identifier>_<postfix>.log`` at the same location.
Plugins may use this for specific logging tasks. For example, a :class:`~octoprint.plugin.SlicingPlugin` might
want to create a log file for logging the output of the slicing engine itself if some debug flag is set.
Arguments:
postfix (str): Postfix of the logfile for which to create the path. If set, the file name of the log file
will be ``plugin_<plugin identifier>_<postfix>.log``, if not it will be
``plugin_<plugin identifier>.log``.
Returns:
str: Absolute path to the log file, directly usable by the plugin.
"""
filename = "plugin_" + self.plugin_key
if postfix is not None:
filename += "_" + postfix
filename += ".log"
return os.path.join(self.settings.getBaseFolder("logs"), filename)
@deprecated("PluginSettings.get_plugin_data_folder has been replaced by OctoPrintPlugin.get_plugin_data_folder",
includedoc="Replaced by :func:`~octoprint.plugin.types.OctoPrintPlugin.get_plugin_data_folder`",
since="1.2.0")
def get_plugin_data_folder(self):
path = os.path.join(self.settings.getBaseFolder("data"), self.plugin_key)
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_all_data(self, **kwargs):
merged = kwargs.get("merged", True)
asdict = kwargs.get("asdict", True)
defaults = kwargs.get("defaults", self.defaults)
preprocessors = kwargs.get("preprocessors", self.get_preprocessors)
kwargs.update(dict(
merged=merged,
asdict=asdict,
defaults=defaults,
preprocessors=preprocessors
))
return self.settings.get(self._prefix_path(), **kwargs)
def clean_all_data(self):
self.settings.remove(self._prefix_path())
def __getattr__(self, item):
all_access_methods = self.access_methods.keys() + self.deprecated_access_methods.keys()
if item in all_access_methods:
decorator = None
if item in self.deprecated_access_methods:
new = self.deprecated_access_methods[item]
decorator = deprecated("{old} has been renamed to {new}".format(old=item, new=new), stacklevel=2)
item = new
settings_name, args_mapper, kwargs_mapper = self.access_methods[item]
if hasattr(self.settings, settings_name) and callable(getattr(self.settings, settings_name)):
orig_func = getattr(self.settings, settings_name)
if decorator is not None:
orig_func = decorator(orig_func)
def _func(*args, **kwargs):
return orig_func(*args_mapper(args), **kwargs_mapper(kwargs))
_func.__name__ = item
_func.__doc__ = orig_func.__doc__ if "__doc__" in dir(orig_func) else None
return _func
return getattr(self.settings, item)
##~~ deprecated methods follow
# TODO: Remove with release of 1.3.0
globalGet = deprecated("globalGet has been renamed to global_get",
includedoc="Replaced by :func:`global_get`",
since="1.2.0-dev-546")(global_get)
globalGetInt = deprecated("globalGetInt has been renamed to global_get_int",
includedoc="Replaced by :func:`global_get_int`",
since="1.2.0-dev-546")(global_get_int)
globalGetFloat = deprecated("globalGetFloat has been renamed to global_get_float",
includedoc="Replaced by :func:`global_get_float`",
since="1.2.0-dev-546")(global_get_float)
globalGetBoolean = deprecated("globalGetBoolean has been renamed to global_get_boolean",
includedoc="Replaced by :func:`global_get_boolean`",
since="1.2.0-dev-546")(global_get_boolean)
globalSet = deprecated("globalSet has been renamed to global_set",
includedoc="Replaced by :func:`global_set`",
since="1.2.0-dev-546")(global_set)
globalSetInt = deprecated("globalSetInt has been renamed to global_set_int",
includedoc="Replaced by :func:`global_set_int`",
since="1.2.0-dev-546")(global_set_int)
globalSetFloat = deprecated("globalSetFloat has been renamed to global_set_float",
includedoc="Replaced by :func:`global_set_float`",
since="1.2.0-dev-546")(global_set_float)
globalSetBoolean = deprecated("globalSetBoolean has been renamed to global_set_boolean",
includedoc="Replaced by :func:`global_set_boolean`",
since="1.2.0-dev-546")(global_set_boolean)
globalGetBaseFolder = deprecated("globalGetBaseFolder has been renamed to global_get_basefolder",
includedoc="Replaced by :func:`global_get_basefolder`",
since="1.2.0-dev-546")(global_get_basefolder)
getPluginLogfilePath = deprecated("getPluginLogfilePath has been renamed to get_plugin_logfile_path",
includedoc="Replaced by :func:`get_plugin_logfile_path`",
since="1.2.0-dev-546")(get_plugin_logfile_path)
| agpl-3.0 |
halfcrazy/sqlalchemy | test/orm/test_composites.py | 24 | 29356 | from sqlalchemy.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey, \
select
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, \
CompositeProperty, aliased
from sqlalchemy.orm import composite, Session, configure_mappers
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
class PointTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('graphs', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)))
Table('edges', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('graph_id', Integer,
ForeignKey('graphs.id')),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
@classmethod
def setup_mappers(cls):
graphs, edges = cls.tables.graphs, cls.tables.edges
class Point(cls.Comparable):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
__hash__ = None
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not isinstance(other, Point) or \
not self.__eq__(other)
class Graph(cls.Comparable):
pass
class Edge(cls.Comparable):
def __init__(self, *args):
if args:
self.start, self.end = args
mapper(Graph, graphs, properties={
'edges':relationship(Edge)
})
mapper(Edge, edges, properties={
'start':sa.orm.composite(Point, edges.c.x1, edges.c.y1),
'end': sa.orm.composite(Point, edges.c.x2, edges.c.y2)
})
def _fixture(self):
Graph, Edge, Point = (self.classes.Graph,
self.classes.Edge,
self.classes.Point)
sess = Session()
g = Graph(id=1, edges=[
Edge(Point(3, 4), Point(5, 6)),
Edge(Point(14, 5), Point(2, 7))
])
sess.add(g)
sess.commit()
return sess
def test_early_configure(self):
# test [ticket:2935], that we can call a composite
# expression before configure_mappers()
Edge = self.classes.Edge
Edge.start.__clause_element__()
def test_round_trip(self):
Graph, Point = self.classes.Graph, self.classes.Point
sess = self._fixture()
g1 = sess.query(Graph).first()
sess.close()
g = sess.query(Graph).get(g1.id)
eq_(
[(e.start, e.end) for e in g.edges],
[
(Point(3, 4), Point(5, 6)),
(Point(14, 5), Point(2, 7)),
]
)
def test_detect_change(self):
Graph, Edge, Point = (self.classes.Graph,
self.classes.Edge,
self.classes.Point)
sess = self._fixture()
g = sess.query(Graph).first()
g.edges[1].end = Point(18, 4)
sess.commit()
e = sess.query(Edge).get(g.edges[1].id)
eq_(e.end, Point(18, 4))
def test_not_none(self):
Graph, Edge, Point = (self.classes.Graph,
self.classes.Edge,
self.classes.Point)
# current contract. the composite is None
# when hasn't been populated etc. on a
# pending/transient object.
e1 = Edge()
assert e1.end is None
sess = Session()
sess.add(e1)
# however, once it's persistent, the code as of 0.7.3
# would unconditionally populate it, even though it's
# all None. I think this usage contract is inconsistent,
# and it would be better that the composite is just
# created unconditionally in all cases.
# but as we are just trying to fix [ticket:2308] and
# [ticket:2309] without changing behavior we maintain
# that only "persistent" gets the composite with the
# Nones
sess.flush()
assert e1.end is not None
def test_eager_load(self):
Graph, Point = self.classes.Graph, self.classes.Point
sess = self._fixture()
g = sess.query(Graph).first()
sess.close()
def go():
g2 = sess.query(Graph).\
options(sa.orm.joinedload('edges')).\
get(g.id)
eq_(
[(e.start, e.end) for e in g2.edges],
[
(Point(3, 4), Point(5, 6)),
(Point(14, 5), Point(2, 7)),
]
)
self.assert_sql_count(testing.db, go, 1)
def test_comparator(self):
Graph, Edge, Point = (self.classes.Graph,
self.classes.Edge,
self.classes.Point)
sess = self._fixture()
g = sess.query(Graph).first()
assert sess.query(Edge).\
filter(Edge.start == Point(3, 4)).one() is \
g.edges[0]
assert sess.query(Edge).\
filter(Edge.start != Point(3, 4)).first() is \
g.edges[1]
eq_(
sess.query(Edge).filter(Edge.start == None).all(),
[]
)
def test_comparator_aliased(self):
Graph, Edge, Point = (self.classes.Graph,
self.classes.Edge,
self.classes.Point)
sess = self._fixture()
g = sess.query(Graph).first()
ea = aliased(Edge)
assert sess.query(ea).\
filter(ea.start != Point(3, 4)).first() is \
g.edges[1]
def test_get_history(self):
Edge = self.classes.Edge
Point = self.classes.Point
from sqlalchemy.orm.attributes import get_history
e1 = Edge()
e1.start = Point(1,2)
eq_(
get_history(e1, 'start'),
([Point(x=1, y=2)], (), [Point(x=None, y=None)])
)
eq_(
get_history(e1, 'end'),
((), [Point(x=None, y=None)], ())
)
def test_query_cols_legacy(self):
Edge = self.classes.Edge
sess = self._fixture()
eq_(
sess.query(Edge.start.clauses, Edge.end.clauses).all(),
[(3, 4, 5, 6), (14, 5, 2, 7)]
)
def test_query_cols(self):
Edge = self.classes.Edge
Point = self.classes.Point
sess = self._fixture()
start, end = Edge.start, Edge.end
eq_(
sess.query(start, end).filter(start == Point(3, 4)).all(),
[(Point(3, 4), Point(5, 6))]
)
def test_query_cols_labeled(self):
Edge = self.classes.Edge
Point = self.classes.Point
sess = self._fixture()
start, end = Edge.start, Edge.end
row = sess.query(start.label('s1'), end).filter(start == Point(3, 4)).first()
eq_(row.s1.x, 3)
eq_(row.s1.y, 4)
eq_(row.end.x, 5)
eq_(row.end.y, 6)
def test_delete(self):
Point = self.classes.Point
Graph, Edge = self.classes.Graph, self.classes.Edge
sess = self._fixture()
g = sess.query(Graph).first()
e = g.edges[1]
del e.end
sess.flush()
eq_(
sess.query(Edge.start, Edge.end).all(),
[
(Point(x=3, y=4), Point(x=5, y=6)),
(Point(x=14, y=5), Point(x=None, y=None))
]
)
def test_save_null(self):
"""test saving a null composite value
See google groups thread for more context:
http://groups.google.com/group/sqlalchemy/browse_thread/thread/0c6580a1761b2c29
"""
Graph, Edge = self.classes.Graph, self.classes.Edge
sess = Session()
g = Graph(id=1)
e = Edge(None, None)
g.edges.append(e)
sess.add(g)
sess.commit()
g2 = sess.query(Graph).get(1)
assert g2.edges[-1].start.x is None
assert g2.edges[-1].start.y is None
def test_expire(self):
Graph, Point = self.classes.Graph, self.classes.Point
sess = self._fixture()
g = sess.query(Graph).first()
e = g.edges[0]
sess.expire(e)
assert 'start' not in e.__dict__
assert e.start == Point(3, 4)
def test_default_value(self):
Edge = self.classes.Edge
e = Edge()
eq_(e.start, None)
class PrimaryKeyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('graphs', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('version_id', Integer, primary_key=True,
nullable=True),
Column('name', String(30)))
@classmethod
def setup_mappers(cls):
graphs = cls.tables.graphs
class Version(cls.Comparable):
def __init__(self, id, version):
self.id = id
self.version = version
def __composite_values__(self):
return (self.id, self.version)
__hash__ = None
def __eq__(self, other):
return isinstance(other, Version) and \
other.id == self.id and \
other.version == self.version
def __ne__(self, other):
return not self.__eq__(other)
class Graph(cls.Comparable):
def __init__(self, version):
self.version = version
mapper(Graph, graphs, properties={
'version':sa.orm.composite(Version, graphs.c.id,
graphs.c.version_id)})
def _fixture(self):
Graph, Version = self.classes.Graph, self.classes.Version
sess = Session()
g = Graph(Version(1, 1))
sess.add(g)
sess.commit()
return sess
def test_get_by_col(self):
Graph = self.classes.Graph
sess = self._fixture()
g = sess.query(Graph).first()
g2 = sess.query(Graph).get([g.id, g.version_id])
eq_(g.version, g2.version)
def test_get_by_composite(self):
Graph, Version = self.classes.Graph, self.classes.Version
sess = self._fixture()
g = sess.query(Graph).first()
g2 = sess.query(Graph).get(Version(g.id, g.version_id))
eq_(g.version, g2.version)
@testing.fails_on('mssql', 'Cannot update identity columns.')
def test_pk_mutation(self):
Graph, Version = self.classes.Graph, self.classes.Version
sess = self._fixture()
g = sess.query(Graph).first()
g.version = Version(2, 1)
sess.commit()
g2 = sess.query(Graph).get(Version(2, 1))
eq_(g.version, g2.version)
@testing.fails_on_everything_except("sqlite")
def test_null_pk(self):
Graph, Version = self.classes.Graph, self.classes.Version
sess = Session()
# test pk with one column NULL
# only sqlite can really handle this
g = Graph(Version(2, None))
sess.add(g)
sess.commit()
g2 = sess.query(Graph).filter_by(version=Version(2, None)).one()
eq_(g.version, g2.version)
class DefaultsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foobars', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x1', Integer, default=2),
Column('x2', Integer),
Column('x3', Integer, server_default="15"),
Column('x4', Integer)
)
@classmethod
def setup_mappers(cls):
foobars = cls.tables.foobars
class Foobar(cls.Comparable):
pass
class FBComposite(cls.Comparable):
def __init__(self, x1, x2, x3, x4):
self.goofy_x1 = x1
self.x2 = x2
self.x3 = x3
self.x4 = x4
def __composite_values__(self):
return self.goofy_x1, self.x2, self.x3, self.x4
__hash__ = None
def __eq__(self, other):
return other.goofy_x1 == self.goofy_x1 and \
other.x2 == self.x2 and \
other.x3 == self.x3 and \
other.x4 == self.x4
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "FBComposite(%r, %r, %r, %r)" % (
self.goofy_x1, self.x2, self.x3, self.x4
)
mapper(Foobar, foobars, properties=dict(
foob=sa.orm.composite(FBComposite,
foobars.c.x1,
foobars.c.x2,
foobars.c.x3,
foobars.c.x4)
))
def test_attributes_with_defaults(self):
Foobar, FBComposite = self.classes.Foobar, self.classes.FBComposite
sess = Session()
f1 = Foobar()
f1.foob = FBComposite(None, 5, None, None)
sess.add(f1)
sess.flush()
eq_(f1.foob, FBComposite(2, 5, 15, None))
f2 = Foobar()
sess.add(f2)
sess.flush()
eq_(f2.foob, FBComposite(2, None, 15, None))
def test_set_composite_values(self):
Foobar, FBComposite = self.classes.Foobar, self.classes.FBComposite
sess = Session()
f1 = Foobar()
f1.foob = FBComposite(None, 5, None, None)
sess.add(f1)
sess.flush()
eq_(f1.foob, FBComposite(2, 5, 15, None))
class MappedSelectTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('descriptions', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('d1', String(20)),
Column('d2', String(20)),
)
Table('values', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('description_id', Integer,
ForeignKey('descriptions.id'),
nullable=False),
Column('v1', String(20)),
Column('v2', String(20)),
)
@classmethod
def setup_mappers(cls):
values, descriptions = cls.tables.values, cls.tables.descriptions
class Descriptions(cls.Comparable):
pass
class Values(cls.Comparable):
pass
class CustomValues(cls.Comparable, list):
def __init__(self, *args):
self.extend(args)
def __composite_values__(self):
return self
desc_values = select(
[values, descriptions.c.d1, descriptions.c.d2],
descriptions.c.id == values.c.description_id
).alias('descriptions_values')
mapper(Descriptions, descriptions, properties={
'values': relationship(Values, lazy='dynamic'),
'custom_descriptions': composite(
CustomValues,
descriptions.c.d1,
descriptions.c.d2),
})
mapper(Values, desc_values, properties={
'custom_values': composite(CustomValues,
desc_values.c.v1,
desc_values.c.v2),
})
def test_set_composite_attrs_via_selectable(self):
Values, CustomValues, values, Descriptions, descriptions = (self.classes.Values,
self.classes.CustomValues,
self.tables.values,
self.classes.Descriptions,
self.tables.descriptions)
session = Session()
d = Descriptions(
custom_descriptions = CustomValues('Color', 'Number'),
values =[
Values(custom_values = CustomValues('Red', '5')),
Values(custom_values=CustomValues('Blue', '1'))
]
)
session.add(d)
session.commit()
eq_(
testing.db.execute(descriptions.select()).fetchall(),
[(1, 'Color', 'Number')]
)
eq_(
testing.db.execute(values.select()).fetchall(),
[(1, 1, 'Red', '5'), (2, 1, 'Blue', '1')]
)
class ManyToOneTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a',
metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('b1', String(20)),
Column('b2_id', Integer, ForeignKey('b.id'))
)
Table('b', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(20))
)
@classmethod
def setup_mappers(cls):
a, b = cls.tables.a, cls.tables.b
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
def __init__(self, b1, b2):
self.b1, self.b2 = b1, b2
def __composite_values__(self):
return self.b1, self.b2
def __eq__(self, other):
return isinstance(other, C) and \
other.b1 == self.b1 and \
other.b2 == self.b2
mapper(A, a, properties={
'b2':relationship(B),
'c':composite(C, 'b1', 'b2')
})
mapper(B, b)
def test_early_configure(self):
# test [ticket:2935], that we can call a composite
# expression before configure_mappers()
A = self.classes.A
A.c.__clause_element__()
def test_persist(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
sess.add(A(c=C('b1', B(data='b2'))))
sess.commit()
a1 = sess.query(A).one()
eq_(a1.c, C('b1', B(data='b2')))
def test_query(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
b1, b2 = B(data='b1'), B(data='b2')
a1 = A(c=C('a1b1', b1))
a2 = A(c=C('a2b1', b2))
sess.add_all([a1, a2])
sess.commit()
eq_(
sess.query(A).filter(A.c == C('a2b1', b2)).one(),
a2
)
def test_query_aliased(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
b1, b2 = B(data='b1'), B(data='b2')
a1 = A(c=C('a1b1', b1))
a2 = A(c=C('a2b1', b2))
sess.add_all([a1, a2])
sess.commit()
ae = aliased(A)
eq_(
sess.query(ae).filter(ae.c == C('a2b1', b2)).one(),
a2
)
class ConfigurationTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('edge', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
@classmethod
def setup_mappers(cls):
class Point(cls.Comparable):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not isinstance(other, Point) or \
not self.__eq__(other)
class Edge(cls.Comparable):
pass
def _test_roundtrip(self):
Edge, Point = self.classes.Edge, self.classes.Point
e1 = Edge(start=Point(3, 4), end=Point(5, 6))
sess = Session()
sess.add(e1)
sess.commit()
eq_(
sess.query(Edge).one(),
Edge(start=Point(3, 4), end=Point(5, 6))
)
def test_columns(self):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
mapper(Edge, edge, properties={
'start':sa.orm.composite(Point, edge.c.x1, edge.c.y1),
'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2)
})
self._test_roundtrip()
def test_attributes(self):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
m = mapper(Edge, edge)
m.add_property('start', sa.orm.composite(Point, Edge.x1, Edge.y1))
m.add_property('end', sa.orm.composite(Point, Edge.x2, Edge.y2))
self._test_roundtrip()
def test_strings(self):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
m = mapper(Edge, edge)
m.add_property('start', sa.orm.composite(Point, 'x1', 'y1'))
m.add_property('end', sa.orm.composite(Point, 'x2', 'y2'))
self._test_roundtrip()
def test_deferred(self):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
mapper(Edge, edge, properties={
'start':sa.orm.composite(Point, edge.c.x1, edge.c.y1,
deferred=True, group='s'),
'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2,
deferred=True)
})
self._test_roundtrip()
def test_check_prop_type(self):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
mapper(Edge, edge, properties={
'start': sa.orm.composite(Point, (edge.c.x1,), edge.c.y1),
})
assert_raises_message(
sa.exc.ArgumentError,
# note that we also are checking that the tuple
# renders here, so the "%" operator in the string needs to
# apply the tuple also
r"Composite expects Column objects or mapped "
"attributes/attribute names as "
"arguments, got: \(Column",
configure_mappers
)
class ComparatorTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('edge', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
@classmethod
def setup_mappers(cls):
class Point(cls.Comparable):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not isinstance(other, Point) or \
not self.__eq__(other)
class Edge(cls.Comparable):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return isinstance(other, Edge) and \
other.id == self.id
def _fixture(self, custom):
edge, Edge, Point = (self.tables.edge,
self.classes.Edge,
self.classes.Point)
if custom:
class CustomComparator(sa.orm.CompositeProperty.Comparator):
def near(self, other, d):
clauses = self.__clause_element__().clauses
diff_x = clauses[0] - other.x
diff_y = clauses[1] - other.y
return diff_x * diff_x + diff_y * diff_y <= d * d
mapper(Edge, edge, properties={
'start': sa.orm.composite(Point, edge.c.x1, edge.c.y1,
comparator_factory=CustomComparator),
'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2)
})
else:
mapper(Edge, edge, properties={
'start': sa.orm.composite(Point, edge.c.x1, edge.c.y1),
'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2)
})
def test_comparator_behavior_default(self):
self._fixture(False)
self._test_comparator_behavior()
def test_comparator_behavior_custom(self):
self._fixture(True)
self._test_comparator_behavior()
def _test_comparator_behavior(self):
Edge, Point = (self.classes.Edge,
self.classes.Point)
sess = Session()
e1 = Edge(Point(3, 4), Point(5, 6))
e2 = Edge(Point(14, 5), Point(2, 7))
sess.add_all([e1, e2])
sess.commit()
assert sess.query(Edge).\
filter(Edge.start==Point(3, 4)).one() is \
e1
assert sess.query(Edge).\
filter(Edge.start!=Point(3, 4)).first() is \
e2
eq_(
sess.query(Edge).filter(Edge.start==None).all(),
[]
)
def test_default_comparator_factory(self):
self._fixture(False)
Edge = self.classes.Edge
start_prop = Edge.start.property
assert start_prop.comparator_factory is CompositeProperty.Comparator
def test_custom_comparator_factory(self):
self._fixture(True)
Edge, Point = (self.classes.Edge,
self.classes.Point)
edge_1, edge_2 = Edge(Point(0, 0), Point(3, 5)), \
Edge(Point(0, 1), Point(3, 5))
sess = Session()
sess.add_all([edge_1, edge_2])
sess.commit()
near_edges = sess.query(Edge).filter(
Edge.start.near(Point(1, 1), 1)
).all()
assert edge_1 not in near_edges
assert edge_2 in near_edges
near_edges = sess.query(Edge).filter(
Edge.start.near(Point(0, 1), 1)
).all()
assert edge_1 in near_edges and edge_2 in near_edges
def test_order_by(self):
self._fixture(False)
Edge = self.classes.Edge
s = Session()
self.assert_compile(
s.query(Edge).order_by(Edge.start, Edge.end),
"SELECT edge.id AS edge_id, edge.x1 AS edge_x1, "
"edge.y1 AS edge_y1, edge.x2 AS edge_x2, edge.y2 AS edge_y2 "
"FROM edge ORDER BY edge.x1, edge.y1, edge.x2, edge.y2"
)
def test_order_by_aliased(self):
self._fixture(False)
Edge = self.classes.Edge
s = Session()
ea = aliased(Edge)
self.assert_compile(
s.query(ea).order_by(ea.start, ea.end),
"SELECT edge_1.id AS edge_1_id, edge_1.x1 AS edge_1_x1, "
"edge_1.y1 AS edge_1_y1, edge_1.x2 AS edge_1_x2, "
"edge_1.y2 AS edge_1_y2 "
"FROM edge AS edge_1 ORDER BY edge_1.x1, edge_1.y1, "
"edge_1.x2, edge_1.y2"
)
def test_clause_expansion(self):
self._fixture(False)
Edge = self.classes.Edge
from sqlalchemy.orm import configure_mappers
configure_mappers()
self.assert_compile(
select([Edge]).order_by(Edge.start),
"SELECT edge.id, edge.x1, edge.y1, edge.x2, edge.y2 FROM edge "
"ORDER BY edge.x1, edge.y1"
)
| mit |
zengchunyun/s12 | day9/temp/salt_server.py | 1 | 2506 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
# import salt
#
# <Salt ID>: # The id to reference the target system with
# host: # The IP address or DNS name of the remote host
# user: # The user to log in as
# passwd: # The password to log in with
#
# # Optional parameters
# port: # The target system's ssh port number
# sudo: # Boolean to run command via sudo
# tty: # Boolean: Set this option to True if sudo is also set to
# # True and requiretty is also set on the target system
# priv: # File path to ssh private key, defaults to salt-ssh.rsa
# # The priv can also be set to agent-forwarding to not specify
# # a key, but use ssh agent forwarding
# timeout: # Number of seconds to wait for response when establishing
# # an SSH connection
# minion_opts: # Dictionary of minion opts
# thin_dir: # The target system's storage directory for Salt
# # components. Defaults to /tmp/salt-<hash>.
# cmd_umask: # umask to enforce for the salt-call command. Should be in
# # octal (so for 0o077 in YAML you would do 0077, or 63)
# ext_pillar:
# - example_a: some argument
# - example_b:
# - argumentA
# - argumentB
# - example_c:
# keyA: valueA
# keyB: valueB
import logging
log = logging.getLogger(__name__)
try:
import weird_thing
EXAMPLE_A_LOADED = True
except ImportError:
EXAMPLE_A_LOADED = False
__opts__ = { 'example_a.someconfig': 137 }
def __init__( __opts__ ):
# Do init work here
# This external pillar will be known as `example_a`
def __virtual__():
if EXAMPLE_A_LOADED:
return True
return False
# This external pillar will be known as `something_else`
__virtualname__ = 'something_else'
def __virtual__():
if EXAMPLE_A_LOADED:
return __virtualname__
return False
ext_pillar( id, pillar, 'some argument' ) # example_a
ext_pillar( id, pillar, 'argumentA', 'argumentB' ) # example_b
ext_pillar( id, pillar, keyA='valueA', keyB='valueB' } ) # example_c
def ext_pillar( minion_id, pillar, *args, **kwargs ):
my_pillar = {'external_pillar': {}}
my_pillar['external_pillar'] = get_external_pillar_dictionary()
return my_pillar
salt-call '*' pillar.get external_pillar
ext_pillar:
- cmd_json: 'echo {\"arg\":\"value\"}' | gpl-2.0 |
numerigraphe/odoo | addons/website_customer/controllers/main.py | 251 | 4306 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.website.models.website import unslug
from openerp.tools.translate import _
from openerp.addons.web.http import request
import werkzeug.urls
class WebsiteCustomer(http.Controller):
_references_per_page = 20
@http.route([
'/customers',
'/customers/page/<int:page>',
'/customers/country/<int:country_id>',
'/customers/country/<country_name>-<int:country_id>',
'/customers/country/<int:country_id>/page/<int:page>',
'/customers/country/<country_name>-<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def customers(self, country_id=0, page=0, country_name='', **post):
cr, uid, context = request.cr, request.uid, request.context
country_obj = request.registry['res.country']
partner_obj = request.registry['res.partner']
partner_name = post.get('search', '')
domain = [('website_published', '=', True), ('assigned_partner_id', '!=', False)]
if partner_name:
domain += [
'|',
('name', 'ilike', post.get("search")),
('website_description', 'ilike', post.get("search"))
]
# group by country, based on customers found with the search(domain)
countries = partner_obj.read_group(
cr, openerp.SUPERUSER_ID, domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
country_count = partner_obj.search(
cr, openerp.SUPERUSER_ID, domain, count=True, context=request.context)
if country_id:
domain += [('country_id', '=', country_id)]
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
country = country_obj.read(cr, uid, country_id, ['name'], context)
if country:
countries.append({
'country_id_count': 0,
'country_id': (country_id, country['name'])
})
countries.sort(key=lambda d: d['country_id'] and d['country_id'][1])
countries.insert(0, {
'country_id_count': country_count,
'country_id': (0, _("All Countries"))
})
# search customers to display
partner_count = partner_obj.search_count(cr, openerp.SUPERUSER_ID, domain, context=request.context)
# pager
url = '/customers'
if country_id:
url += '/country/%s' % country_id
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page,
scope=7, url_args=post
)
partner_ids = partner_obj.search(request.cr, openerp.SUPERUSER_ID, domain,
offset=pager['offset'], limit=self._references_per_page,
context=request.context)
google_map_partner_ids = ','.join(map(str, partner_ids))
partners = partner_obj.browse(request.cr, openerp.SUPERUSER_ID, partner_ids, request.context)
values = {
'countries': countries,
'current_country_id': country_id or 0,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search_path': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_customer.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/customers/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_customer.details", values)
return self.customers(**post)
| agpl-3.0 |
wojenny/THash | p2pool/bitcoin/networks/polcoin.py | 1 | 1145 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'a5725982'.decode('hex') #pchmessagestart
P2P_PORT = 9338
ADDRESS_VERSION = 0 #pubkey_address
RPC_PORT = 9337
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'polcoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 50*100000000 >> (height + 1)//210000
POW_FUNC = data.hash256
BLOCK_PERIOD = 60 # s
SYMBOL = 'PLC'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'polcoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/polcoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.polcoin'), 'polcoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://plcexplorer.com/block/' #dummy
ADDRESS_EXPLORER_URL_PREFIX = 'http://plcexplorer.com/address/'
TX_EXPLORER_URL_PREFIX = 'http://plcexplorer.com/tx/'
SANE_TARGET_RANGE = (2**256//2**32//1000 - 1, 2**256//2**32 - 1)
DUMB_SCRYPT_DIFF = 1
DUST_THRESHOLD = 0.001e8
| gpl-3.0 |
wtsi-ssg/pcp | pcplib/lustreapi.py | 4 | 7128 | # Copyright (c) Genome Research Ltd 2012
# Author Guy Coates <gmpc@sanger.ac.uk>
# This program is released under the GNU Public License V2 (GPLv2)
"""
Python bindings to minimal subset of lustre api.
This module requires a dynamically linked version of the lustre
client library (liblustreapi.so).
Older version of the lustre client only ships a static library (liblustreapi.a).
setup.py should have generated a dynamic version during installation.
You can generate the dynamic library by hand by doing the following:
ar -x liblustreapi.a
gcc -shared -o liblustreapi.so *.o
"""
import ctypes
import ctypes.util
import os
import select
import sys
import pkg_resources
try:
__version__ = pkg_resources.require("pcp")[0].version
except pkg_resources.DistributionNotFound:
__version__ = "UNRELEASED"
LUSTREMAGIC = 0xbd00bd0
liblocation = ctypes.util.find_library("lustreapi")
# See if liblustreapi.so is in the same directory as the module
if not liblocation:
modlocation, module = os.path.split(__file__)
liblocation = os.path.join(modlocation, "liblustreapi.so")
lustre = ctypes.CDLL(liblocation, use_errno=True)
# ctype boilerplate for C data structures and functions
class lov_user_ost_data_v1(ctypes.Structure):
_fields_ = [
("l_object_id", ctypes.c_ulonglong),
("l_object_gr", ctypes.c_ulonglong),
("l_ost_gen", ctypes.c_uint),
("l_ost_idx", ctypes.c_uint)
]
class lov_user_md_v1(ctypes.Structure):
_fields_ = [
("lmm_magic", ctypes.c_uint),
("lmm_pattern", ctypes.c_uint),
("lmm_object_id", ctypes.c_ulonglong),
("lmm_object_gr", ctypes.c_ulonglong),
("lmm_stripe_size", ctypes.c_uint),
("lmm_stripe_count", ctypes.c_short),
("lmm_stripe_offset", ctypes.c_short),
("lmm_objects", lov_user_ost_data_v1 * 2000 ),
]
lov_user_md_v1_p = ctypes.POINTER(lov_user_md_v1)
lustre.llapi_file_get_stripe.argtypes = [ctypes.c_char_p, lov_user_md_v1_p]
lustre.llapi_file_open.argtypes = [ctypes.c_char_p, ctypes.c_int,
ctypes.c_int, ctypes.c_ulong, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
class stripeObj:
"""
lustre stripe object.
This object contains details of the striping of a lustre file.
Attributes:
lovdata: lov_user_md_v1 structure as returned by the lustre C API.
stripecount: Stripe count.
stripesize: Stripe size (bytes).
stripeoffset: Stripe offset.
ostobjects[]: List of lov_user_ost_data_v1 structures as returned by the
C API.
"""
def __init__(self):
self.lovdata = lov_user_md_v1()
self.stripecount = -1
self.stripesize = 0
self.stripeoffset = -1
self.ostobjects = []
def __str__(self):
string = "Stripe Count: %i Stripe Size: %i Stripe Offset: %i\n" \
% (self.stripecount, self.stripesize, self.stripeoffset)
for ost in self.ostobjects:
string += ("Objidx:\t %i \tObjid:\t %i\n" % (ost.l_ost_idx,
ost.l_object_id))
return(string)
def isstriped(self):
if self.stripecount > 1 or self.stripecount == -1:
return(True)
else:
return(False)
def getstripe(filename):
"""Returns a stripeObj containing the stipe information of filename.
Arguments:
filename: The name of the file to query.
Returns:
A stripeObj containing the stripe information.
"""
stripeobj = stripeObj()
lovdata = lov_user_md_v1()
stripeobj.lovdata = lovdata
err = lustre.llapi_file_get_stripe(filename, ctypes.byref(lovdata))
# err 61 is due to LU-541 (see below)
if err < 0 and err != -61:
err = 0 - err
raise IOError(err, os.strerror(err))
# workaround for Whamcloud LU-541
# use the filesystem defaults if no properties set
if err == -61 :
stripeobj.stripecount = 0
stripeobj.stripesize = 0
stripeobj.stripeoffset = -1
else:
for i in range(0, lovdata.lmm_stripe_count):
stripeobj.ostobjects.append(lovdata.lmm_objects[i])
stripeobj.stripecount = lovdata.lmm_stripe_count
stripeobj.stripesize = lovdata.lmm_stripe_size
# lmm_stripe_offset seems to be reported as 0, which is wrong
if len(stripeobj.ostobjects) > 0:
stripeobj.stripeoffset = stripeobj.ostobjects[0].l_ost_idx
else:
stripeobj.stripeoffset = -1
return(stripeobj)
def setstripe(filename, stripeobj=None, stripesize=0, stripeoffset=-1,
stripecount=1):
"""Sets the striping on an existing directory, or create a new empty file
with the specified striping. Stripe parameters can be set explicity, or
you can pass in an existing stripeobj to copy the attributes from an
existing file.
Note you can set the striping on an existing directory, but you cannot set
the striping on an existing file.
Arguments:
stripeobj: copy the parameters from stripeobj.
stripesize: size of stripe in bytes
stripeoffset: stripe offset
stripecount: stripe count
Examples:
#Set the filesystem defaults
setstripe("/lustre/testfile")
# Stripe across all OSTs.
setstripe("/lustre/testfile", stripecount=-1)
#copy the attributes from foo
stripeobj = getstripe("/lustre/foo")
setstripe("/lustre/testfile", stripeobj)
"""
flags = os.O_CREAT
mode = 0700
# only stripe_pattern 0 is supported by lustre.
stripe_pattern = 0
if stripeobj:
stripesize = stripeobj.stripesize
stripeoffset = stripeobj.stripeoffset
stripecount = stripeobj.stripecount
# Capture the lustre error messages, These get printed to stderr via
# liblusteapi, and so we need to intercept them.
message = captureStderr()
fd = lustre.llapi_file_open(filename, flags, mode, stripesize,
stripeoffset, stripecount, stripe_pattern)
message.readData()
message.stopCapture()
if fd < 0:
err = 0 - fd
raise IOError(err, os.strerror(err))
else:
os.close(fd)
return(0)
class captureStderr():
"""This class intercepts stderr and stores any output"""
def __init__(self):
self.pipeout, self.pipein = os.pipe()
self.oldstderr = os.dup(2)
os.dup2(self.pipein, 2)
self.contents=""
def __str__(self):
return (self.contents)
def readData(self):
"""Read data from stderr until there is no more."""
while self.checkData():
self.contents += os.read(self.pipeout, 1024)
def checkData(self):
"""Check to see if there is any data to be read."""
r, _, _ = select.select([self.pipeout], [], [], 0)
return bool(r)
def stopCapture(self):
"""Restore the original stderr"""
os.dup2(self.oldstderr, 2)
os.close(self.pipeout)
os.close(self.pipein)
| gpl-2.0 |
esbullington/pynode-asyncio | bitcoinlib/script.py | 7 | 10368 |
#
# script.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
# push value
OP_0 = 0x00
OP_FALSE = OP_0
OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d
OP_PUSHDATA4 = 0x4e
OP_1NEGATE = 0x4f
OP_RESERVED = 0x50
OP_1 = 0x51
OP_TRUE=OP_1
OP_2 = 0x52
OP_3 = 0x53
OP_4 = 0x54
OP_5 = 0x55
OP_6 = 0x56
OP_7 = 0x57
OP_8 = 0x58
OP_9 = 0x59
OP_10 = 0x5a
OP_11 = 0x5b
OP_12 = 0x5c
OP_13 = 0x5d
OP_14 = 0x5e
OP_15 = 0x5f
OP_16 = 0x60
# control
OP_NOP = 0x61
OP_VER = 0x62
OP_IF = 0x63
OP_NOTIF = 0x64
OP_VERIF = 0x65
OP_VERNOTIF = 0x66
OP_ELSE = 0x67
OP_ENDIF = 0x68
OP_VERIFY = 0x69
OP_RETURN = 0x6a
# stack ops
OP_TOALTSTACK = 0x6b
OP_FROMALTSTACK = 0x6c
OP_2DROP = 0x6d
OP_2DUP = 0x6e
OP_3DUP = 0x6f
OP_2OVER = 0x70
OP_2ROT = 0x71
OP_2SWAP = 0x72
OP_IFDUP = 0x73
OP_DEPTH = 0x74
OP_DROP = 0x75
OP_DUP = 0x76
OP_NIP = 0x77
OP_OVER = 0x78
OP_PICK = 0x79
OP_ROLL = 0x7a
OP_ROT = 0x7b
OP_SWAP = 0x7c
OP_TUCK = 0x7d
# splice ops
OP_CAT = 0x7e
OP_SUBSTR = 0x7f
OP_LEFT = 0x80
OP_RIGHT = 0x81
OP_SIZE = 0x82
# bit logic
OP_INVERT = 0x83
OP_AND = 0x84
OP_OR = 0x85
OP_XOR = 0x86
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_RESERVED1 = 0x89
OP_RESERVED2 = 0x8a
# numeric
OP_1ADD = 0x8b
OP_1SUB = 0x8c
OP_2MUL = 0x8d
OP_2DIV = 0x8e
OP_NEGATE = 0x8f
OP_ABS = 0x90
OP_NOT = 0x91
OP_0NOTEQUAL = 0x92
OP_ADD = 0x93
OP_SUB = 0x94
OP_MUL = 0x95
OP_DIV = 0x96
OP_MOD = 0x97
OP_LSHIFT = 0x98
OP_RSHIFT = 0x99
OP_BOOLAND = 0x9a
OP_BOOLOR = 0x9b
OP_NUMEQUAL = 0x9c
OP_NUMEQUALVERIFY = 0x9d
OP_NUMNOTEQUAL = 0x9e
OP_LESSTHAN = 0x9f
OP_GREATERTHAN = 0xa0
OP_LESSTHANOREQUAL = 0xa1
OP_GREATERTHANOREQUAL = 0xa2
OP_MIN = 0xa3
OP_MAX = 0xa4
OP_WITHIN = 0xa5
# crypto
OP_RIPEMD160 = 0xa6
OP_SHA1 = 0xa7
OP_SHA256 = 0xa8
OP_HASH160 = 0xa9
OP_HASH256 = 0xaa
OP_CODESEPARATOR = 0xab
OP_CHECKSIG = 0xac
OP_CHECKSIGVERIFY = 0xad
OP_CHECKMULTISIG = 0xae
OP_CHECKMULTISIGVERIFY = 0xaf
# expansion
OP_NOP1 = 0xb0
OP_NOP2 = 0xb1
OP_NOP3 = 0xb2
OP_NOP4 = 0xb3
OP_NOP5 = 0xb4
OP_NOP6 = 0xb5
OP_NOP7 = 0xb6
OP_NOP8 = 0xb7
OP_NOP9 = 0xb8
OP_NOP10 = 0xb9
# template matching params
OP_SMALLINTEGER = 0xfa
OP_PUBKEYS = 0xfb
OP_PUBKEYHASH = 0xfd
OP_PUBKEY = 0xfe
OP_INVALIDOPCODE = 0xff
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_NOP2,
OP_NOP3,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES = {
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_NOP2 : 'OP_NOP2',
OP_NOP3 : 'OP_NOP3',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
}
TEMPLATES = [
[ OP_PUBKEY, OP_CHECKSIG ],
[ OP_DUP, OP_HASH160, OP_PUBKEYHASH, OP_EQUALVERIFY, OP_CHECKSIG ],
]
class CScriptOp(object):
def __init__(self):
self.op = OP_INVALIDOPCODE
self.data = ''
self.ser_len = 0
class CScript(object):
def __init__(self, vch=None):
self.vch = vch
self.reset()
def reset(self):
self.pc = 0
if self.vch is None:
self.pend = 0
else:
self.pend = len(self.vch)
self.pbegincodehash = 0
self.sop = None
def getchars(self, n):
if (self.pc + n) > self.pend:
return None
s = self.vch[self.pc:self.pc+n]
self.pc += n
return s
def getop(self):
s = self.getchars(1)
if s is None:
return False
opcode = ord(s)
sop = CScriptOp()
sop.op = opcode
sop.ser_len = 1
if opcode > OP_PUSHDATA4:
if opcode not in VALID_OPCODES:
return False
self.sop = sop
return True
if opcode < OP_PUSHDATA1:
datasize = opcode
elif opcode == OP_PUSHDATA1:
sop.ser_len += 1
s = self.getchars(1)
if s is None:
return False
datasize = ord(s)
elif opcode == OP_PUSHDATA2:
sop.ser_len += 2
s = self.getchars(2)
if s is None:
return False
datasize = struct.unpack(b"<H", s)[0]
elif opcode == OP_PUSHDATA4:
sop.ser_len += 4
s = self.getchars(4)
if s is None:
return False
datasize = struct.unpack(b"<I", s)[0]
sop.ser_len += datasize
sop.data = self.getchars(datasize)
if sop.data is None:
return False
self.sop = sop
return True
def tokenize(self, vch_in=None):
if vch_in is not None:
self.vch = vch_in
self.reset()
while self.pc < self.pend:
if not self.getop():
return False
return True
def match_temp(self, template, vch_in=None):
l = []
i = 0
if vch_in is not None:
self.vch = vch_in
self.reset()
while self.pc < self.pend:
if i >= len(template):
return None
if not self.getop():
return None
expected_op = template[i]
if expected_op == OP_PUBKEYHASH or expected_op == OP_PUBKEY:
if self.sop.op > OP_PUSHDATA4:
return None
l.append(self.sop.data)
elif self.sop.op != expected_op:
return None
i += 1
return l
def match_alltemp(self, vch_in=None):
for temp in TEMPLATES:
l = self.match_temp(temp, vch_in)
if l is not None:
return l
return None
def __repr__(self):
return "CScript(vchsz %d)" % (len(self.vch),)
| mit |
scalingdata/Impala | thirdparty/hive-1.2.1.2.3.0.0-2557/lib/py/thrift/server/TServer.py | 71 | 8153 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import sys
import os
import traceback
import threading
import Queue
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class TServer:
"""Base interface for a server, which must have a serve method."""
""" 3 constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target = self.handle, args=(client,))
t.start()
except KeyboardInterrupt:
raise
except Exception, x:
logging.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args):
TServer.__init__(self, *args)
self.clients = Queue.Queue()
self.threads = 10
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception, x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target = self.serveThread)
t.start()
except Exception, x:
logging.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
self.clients.put(client)
except Exception, x:
logging.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request"""
"""
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError, e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, e:
logging.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
| apache-2.0 |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/wheel/metadata.py | 93 | 11676 | """
Tools for converting old- to new-style metadata.
"""
from collections import namedtuple
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import re
import os.path
import textwrap
import pkg_resources
import email.parser
from . import __version__ as wheel_version
METADATA_VERSION = "2.0"
PLURAL_FIELDS = { "classifier" : "classifiers",
"provides_dist" : "provides",
"provides_extra" : "extras" }
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email":"author_email", "name": "author"},
"author"),
({"email":"maintainer_email", "name": "maintainer"},
"maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page",
"license"))
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
"""
Yield unique values in iterable, preserving order.
"""
seen = set()
for value in iterable:
if not value in seen:
seen.add(value)
yield value
def handle_requires(metadata, pkg_info, key):
"""
Place the runtime requirements from pkg_info into metadata.
"""
may_requires = OrderedDefaultDict(list)
for value in sorted(pkg_info.get_all(key)):
extra_match = EXTRA_RE.search(value)
if extra_match:
groupdict = extra_match.groupdict()
condition = groupdict['condition']
extra = groupdict['extra']
package = groupdict['package']
if condition.endswith(' and '):
condition = condition[:-5]
else:
condition, extra = None, None
package = value
key = MayRequiresKey(condition, extra)
may_requires[key].append(package)
if may_requires:
metadata['run_requires'] = []
def sort_key(item):
# Both condition and extra could be None, which can't be compared
# against strings in Python 3.
key, value = item
if key.condition is None:
return ''
return key.condition
for key, value in sorted(may_requires.items(), key=sort_key):
may_requirement = OrderedDict((('requires', value),))
if key.extra:
may_requirement['extra'] = key.extra
if key.condition:
may_requirement['environment'] = key.condition
metadata['run_requires'].append(may_requirement)
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
"""
Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
The description is included under the key ['description'] rather than
being written to a separate file.
path: path to PKG-INFO file
distribution: optional distutils Distribution()
"""
metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
metadata["generator"] = "bdist_wheel (" + wheel_version + ")"
try:
unicode
pkg_info = read_pkg_info(path)
except NameError:
with open(path, 'rb') as pkg_info_file:
pkg_info = email.parser.Parser().parsestr(pkg_info_file.read().decode('utf-8'))
description = None
if pkg_info['Summary']:
metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
del pkg_info['Summary']
if pkg_info['Description']:
description = dedent_description(pkg_info)
del pkg_info['Description']
else:
payload = pkg_info.get_payload()
if isinstance(payload, bytes):
# Avoid a Python 2 Unicode error.
# We still suffer ? glyphs on Python 3.
payload = payload.decode('utf-8')
if payload:
description = payload
if description:
pkg_info['description'] = description
for key in sorted(unique(k.lower() for k in pkg_info.keys())):
low_key = key.replace('-', '_')
if low_key in SKIP_FIELDS:
continue
if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
continue
if low_key in sorted(PLURAL_FIELDS):
metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
elif low_key == "requires_dist":
handle_requires(metadata, pkg_info, key)
elif low_key == 'provides_extra':
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend(pkg_info.get_all(key))
elif low_key == 'home_page':
metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]}
elif low_key == 'keywords':
metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
else:
metadata[low_key] = pkg_info[key]
metadata['metadata_version'] = METADATA_VERSION
if 'extras' in metadata:
metadata['extras'] = sorted(set(metadata['extras']))
# include more information if distribution is available
if distribution:
for requires, attr in (('test_requires', 'tests_require'),):
try:
requirements = getattr(distribution, attr)
if isinstance(requirements, list):
new_requirements = sorted(convert_requirements(requirements))
metadata[requires] = [{'requires':new_requirements}]
except AttributeError:
pass
# handle contacts
contacts = []
for contact_type, role in CONTACT_FIELDS:
contact = OrderedDict()
for key in sorted(contact_type):
if contact_type[key] in metadata:
contact[key] = metadata.pop(contact_type[key])
if contact:
contact['role'] = role
contacts.append(contact)
if contacts:
metadata['extensions']['python.details']['contacts'] = contacts
# convert entry points to exports
try:
with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
exports = OrderedDict()
for group, items in sorted(ep_map.items()):
exports[group] = OrderedDict()
for item in sorted(map(str, items.values())):
name, export = item.split(' = ', 1)
exports[group][name] = export
if exports:
metadata['extensions']['python.exports'] = exports
except IOError:
pass
# copy console_scripts entry points to commands
if 'python.exports' in metadata['extensions']:
for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
('gui_scripts', 'wrap_gui')):
if ep_script in metadata['extensions']['python.exports']:
metadata['extensions']['python.commands'][wrap_script] = \
metadata['extensions']['python.exports'][ep_script]
return metadata
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(requires_dist)
def convert_requirements(requirements):
"""Yield Requires-Dist: strings for parsed requirements strings."""
for req in requirements:
parsed_requirement = pkg_resources.Requirement.parse(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ",".join(parsed_requirement.extras)
if extras:
extras = "[%s]" % extras
yield (parsed_requirement.project_name + extras + spec)
def generate_requirements(extras_require):
"""
Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
and ('Provides-Extra', 'extra') tuples.
extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
using the empty extra {'': [requirements]} to hold install_requires.
"""
for extra, depends in extras_require.items():
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
extra = pkg_resources.safe_extra(extra)
if extra:
yield ('Provides-Extra', extra)
if condition:
condition += " and "
condition += "extra == '%s'" % extra
if condition:
condition = '; ' + condition
for new_req in convert_requirements(depends):
yield ('Requires-Dist', new_req + condition)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
with open(requires_path) as requires_file:
requires = requires_file.read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
for item in generate_requirements({extra: reqs}):
pkg_info[item[0]] = item[1]
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape')\
.decode('utf-8')
break
return text
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent\
.encode("utf8")\
.decode("ascii", "surrogateescape")
return description_dedent
if __name__ == "__main__":
import sys, pprint
pprint.pprint(pkginfo_to_dict(sys.argv[1]))
| mit |
jasonzzz/ansible | lib/ansible/module_utils/shell.py | 2 | 8009 | #
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import socket
import time
# py2 vs py3; replace with six via ansiballz
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import paramiko
from paramiko.ssh_exception import AuthenticationException
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkError
ANSI_RE = [
re.compile(r'(\x1b\[\?1h\x1b=)'),
re.compile(r'\x08.')
]
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ShellError(Exception):
def __init__(self, msg, command=None):
super(ShellError, self).__init__(msg)
self.message = msg
self.command = command
class Shell(object):
def __init__(self, prompts_re=None, errors_re=None, kickstart=True):
self.ssh = None
self.shell = None
self.kickstart = kickstart
self._matched_prompt = None
self.prompts = prompts_re or list()
self.errors = errors_re or list()
def open(self, host, port=22, username=None, password=None, timeout=10,
key_filename=None, pkey=None, look_for_keys=None,
allow_agent=False, key_policy="loose"):
self.ssh = paramiko.SSHClient()
if key_policy != "ignore":
self.ssh.load_system_host_keys()
try:
self.ssh.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
pass
if key_policy == "strict":
self.ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
else:
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# unless explicitly set, disable look for keys if a password is
# present. this changes the default search order paramiko implements
if not look_for_keys:
look_for_keys = password is None
try:
self.ssh.connect(
host, port=port, username=username, password=password,
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
key_filename=key_filename, allow_agent=allow_agent,
)
self.shell = self.ssh.invoke_shell()
self.shell.settimeout(timeout)
except socket.gaierror:
raise ShellError("unable to resolve host name")
except AuthenticationException:
raise ShellError('Unable to authenticate to remote device')
except socket.error:
exc = get_exception()
if exc.errno == 60:
raise ShellError('timeout trying to connect to host')
raise
if self.kickstart:
self.shell.sendall("\n")
self.receive()
def strip(self, data):
for regex in ANSI_RE:
data = regex.sub('', data)
return data
def receive(self, cmd=None):
recv = StringIO()
handled = False
while True:
data = self.shell.recv(200)
recv.write(data)
recv.seek(recv.tell() - 200)
window = self.strip(recv.read())
if hasattr(cmd, 'prompt') and not handled:
handled = self.handle_prompt(window, cmd)
try:
if self.find_prompt(window):
resp = self.strip(recv.getvalue())
return self.sanitize(cmd, resp)
except ShellError:
exc = get_exception()
exc.command = cmd
raise
def send(self, commands):
responses = list()
try:
for command in to_list(commands):
cmd = '%s\r' % str(command)
self.shell.sendall(cmd)
responses.append(self.receive(command))
except socket.timeout:
raise ShellError("timeout trying to send command: %s" % cmd)
except socket.error:
exc = get_exception()
raise ShellError("problem sending command to host: %s" % exc.message)
return responses
def close(self):
self.shell.close()
def handle_prompt(self, resp, cmd):
prompt = to_list(cmd.prompt)
response = to_list(cmd.response)
for pr, ans in zip(prompt, response):
match = pr.search(resp)
if match:
answer = '%s\r' % ans
self.shell.sendall(answer)
return True
def sanitize(self, cmd, resp):
cleaned = []
for line in resp.splitlines():
if line.startswith(str(cmd)) or self.find_prompt(line):
continue
cleaned.append(line)
return "\n".join(cleaned)
def find_prompt(self, response):
for regex in self.errors:
if regex.search(response):
raise ShellError('matched error in response: %s' % response)
for regex in self.prompts:
match = regex.search(response)
if match:
self._matched_prompt = match.group()
return True
class CliBase(object):
"""Basic paramiko-based ssh transport any NetworkModule can use."""
def __init__(self):
if not HAS_PARAMIKO:
raise NetworkError(
msg='paramiko is required but does not appear to be installed. '
'It can be installed using `pip install paramiko`'
)
self.shell = None
self._connected = False
self.default_output = 'text'
def connect(self, params, kickstart=True):
host = params['host']
port = params.get('port') or 22
username = params['username']
password = params.get('password')
key_file = params.get('ssh_keyfile')
timeout = params['timeout']
try:
self.shell = Shell(
kickstart=kickstart,
prompts_re=self.CLI_PROMPTS_RE,
errors_re=self.CLI_ERRORS_RE,
)
self.shell.open(
host, port=port, username=username, password=password,
key_filename=key_file, timeout=timeout,
)
except ShellError:
exc = get_exception()
raise NetworkError(
msg='failed to connect to %s:%s' % (host, port), exc=str(exc)
)
self._connected = True
def disconnect(self):
self.shell.close()
self._connected = False
def authorize(self, params, **kwargs):
pass
### Command methods ###
def execute(self, commands):
try:
return self.shell.send(commands)
except ShellError:
exc = get_exception()
raise NetworkError(exc.message, commands=commands)
def run_commands(self, commands):
return self.execute(to_list(commands))
### Config methods ###
def configure(self, commands):
raise NotImplementedError
def get_config(self, commands):
raise NotImplementedError
def load_config(self, commands):
raise NotImplementedError
def save_config(self):
raise NotImplementedError
| gpl-3.0 |
ASAZING/android_kernel_lanix_l900 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
KodiColdkeys/coldkeys-addons | repository/plugin.video.white.devil/resources/lib/sources/wrzcraft_wp_jh.py | 6 | 6526 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.language = ['en']
self.domains = ['wrzcraft.net']
self.base_link = 'http://wrzcraft.net'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = client.parseDOM(post, 'enclosure', ret='url')
u = [i for i in u if not 'openload' in i]
if 'tvshowtitle' in data:
u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u]
else:
u = [(t, i) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Wrzcraft', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
c-a/jhbuild | jhbuild/cut_n_paste/subprocess.py | 8 | 40883 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of the
# author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*args, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
if mswindows:
import threading
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
inst.poll()
PIPE = -1
STDOUT = -2
def call(*args, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*args, **kwargs).wait()
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
_active.append(self)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin == None and stdout == None and stderr == None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin == None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif type(stdin) == types.IntType:
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout == None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif type(stdout) == types.IntType:
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr == None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif type(stderr) == types.IntType:
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
default_startupinfo = STARTUPINFO()
if startupinfo == None:
startupinfo = default_startupinfo
if not None in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
default_startupinfo.dwFlags |= STARTF_USESHOWWINDOW
default_startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != None:
p2cread.Close()
if c2pwrite != None:
c2pwrite.Close()
if errwrite != None:
errwrite.Close()
def poll(self):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode == None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
_active.remove(self)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode == None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
_active.remove(self)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input != None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout != None:
stdout = stdout[0]
if stderr != None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(open, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin == None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif type(stdin) == types.IntType:
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout == None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif type(stdout) == types.IntType:
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr == None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif type(stderr) == types.IntType:
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in range(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable == None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we doesn't close the same
# fd more than once.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd != None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env == None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
_active.remove(self)
def poll(self):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode == None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
pass
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode == None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), input[:512])
input = input[bytes_written:]
if not input:
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout != None:
stdout = ''.join(stdout)
if stderr != None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(open, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| gpl-2.0 |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/contrib/auth/migrations/0001_initial.py | 143 | 4370 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(to='contenttypes.ContentType', to_field='id')),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user')),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| mit |
SerCeMan/intellij-community | python/lib/Lib/StringIO.py | 150 | 10593 | r"""File-like objects that read from or write to a string buffer.
This implements (nearly) all stdio methods.
f = StringIO() # ready for writing
f = StringIO(buf) # ready for reading
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
buf = f.readline() # read until end of line ('\n') or EOF
list = f.readlines()# list of f.readline() results until EOF
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
f.getvalue() # return whole file's contents as a string
Notes:
- Using a real file is often faster (but less convenient).
- There's also a much faster implementation in C, called cStringIO, but
it's not subclassable.
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- Seeking far beyond EOF and then writing will insert real null
bytes that occupy space in the buffer.
- There's a simple test set (see end of this file).
"""
try:
from errno import EINVAL
except ImportError:
EINVAL = 22
__all__ = ["StringIO"]
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class StringIO:
"""class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
if not isinstance(buf, basestring):
buf = str(buf)
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
_complain_ifclosed(self.closed)
r = self.readline()
if not r:
raise StopIteration
return r
def close(self):
"""Free the memory buffer.
"""
if not self.closed:
self.closed = True
del self.buf, self.pos
def isatty(self):
"""Returns False because StringIO objects are not connected to a
tty-like device.
"""
_complain_ifclosed(self.closed)
return False
def seek(self, pos, mode = 0):
"""Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
"""Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
r"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
"""Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
"""Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
_complain_ifclosed(self.closed)
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
def write(self, s):
"""Write a string to the file.
There is no return value.
"""
_complain_ifclosed(self.closed)
if not s: return
# Force s to be a string or unicode
if not isinstance(s, basestring):
s = str(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def writelines(self, iterable):
"""Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
write = self.write
for line in iterable:
write(line)
def flush(self):
"""Flush the internal buffer
"""
_complain_ifclosed(self.closed)
def getvalue(self):
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
# A little test suite
def test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
print 'Position =', f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.truncate(length/2)
f.seek(0, 2)
print 'Truncated length =', f.tell()
if f.tell() != length/2:
raise RuntimeError, 'truncate did not adjust length'
f.close()
if __name__ == '__main__':
test()
| apache-2.0 |
kayhayen/Nuitka | nuitka/importing/IgnoreListing.py | 1 | 10934 | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Ignore listing of modules that are not found, but probably that's acceptable.
"""
import sys
from nuitka.Errors import NuitkaOptimizationError
def getModuleIgnoreList():
return (
"mac",
"nt",
"os2",
"posix",
"_emx_link",
"riscos",
"ce",
"riscospath",
"riscosenviron",
"Carbon.File",
"org.python.core",
"_sha",
"_sha256",
"array",
"_sha512",
"_md5",
"_subprocess",
"msvcrt",
"cPickle",
"marshal",
"imp",
"sys",
"itertools",
"cStringIO",
"time",
"zlib",
"thread",
"math",
"errno",
"operator",
"signal",
"gc",
"exceptions",
"win32process",
"unicodedata",
"__builtin__",
"fcntl",
"_socket",
"_ssl",
"pwd",
"spwd",
"_random",
"grp",
"_io",
"_string",
"select",
"__main__",
"_winreg",
"_warnings",
"_sre",
"_functools",
"_hashlib",
"_collections",
"_locale",
"_codecs",
"_weakref",
"_struct",
"_dummy_threading",
"binascii",
"datetime",
"_ast",
"xxsubtype",
"_bytesio",
"cmath",
"_fileio",
"aetypes",
"aepack",
"MacOS",
"cd",
"cl",
"gdbm",
"gl",
"GL",
"aetools",
"_bisect",
"_heapq",
"_symtable",
"syslog",
"_datetime",
"_elementtree",
"_pickle",
"_posixsubprocess",
"_thread",
"atexit",
"pyexpat",
"_imp",
"_sha1",
"faulthandler",
"_osx_support",
"sysconfig",
"copyreg",
"ipaddress",
"reprlib",
"win32event",
"win32file",
# Python-Qt4 does these if missing python3 parts:
"PyQt4.uic.port_v3.string_io",
"PyQt4.uic.port_v3.load_plugin",
"PyQt4.uic.port_v3.ascii_upper",
"PyQt4.uic.port_v3.proxy_base",
"PyQt4.uic.port_v3.as_string",
# CPython3 does these:
"builtins",
"UserDict",
"os.path",
"StringIO",
# "test_array",
"_testcapi",
# test_applesingle.py
"applesingle",
# test_buffer.py
"_testbuffer",
# test_bsddb.py
"bsddb.test",
# test_collections.py
"collections.abc",
# test_compile.py
"__package__.module",
"__mangled_mod",
"__package__",
# test_ctypes
"ctypes.test",
# test_dbm.py
"dbm.dumb",
# test_dbm_ndbm.py
"dbm.ndbm",
# test_distutils.py
"distutils.tests",
"distutils.mwerkscompiler",
# test_docxmlrpc.py
"xmlrpc",
# test_emails.py
"email.test.test_email",
"email.test.test_email_renamed",
"email.test.test_email_codecs",
# test_email_codecs.py
"email.test",
# test_enum.py
"enum",
# test_file.py
"_pyio",
# test_frozen.py
"__hello__",
"__phello__",
"__phello__.spam",
"__phello__.foo",
# test_fork1.py
"fake test module",
# test_html.py
"html",
"html.entities",
# test_http_cookiejar.py
"urllib.request",
"http",
# test_imp.py
"importlib.test.import_",
"pep3147.foo",
"pep3147",
# test_import.py
"RAnDoM",
"infinite_reload",
"test_trailing_slash",
"nonexistent_xyzzy",
"_parent_foo.bar",
"_parent_foo",
"test_unc_path",
# test_importhooks.py
"hooktestmodule",
"hooktestpackage",
"hooktestpackage.sub",
"reloadmodule",
"hooktestpackage.sub.subber",
"hooktestpackage.oldabs",
"hooktestpackage.newrel",
"hooktestpackage.sub.subber.subest",
"hooktestpackage.futrel",
"sub",
"hooktestpackage.newabs",
# test_imporlib.py"
"importlib.test.__main__",
"importlib",
# test_inspect.py
"inspect_fodder3",
"test.test_import",
# test_imageop.py
"imgfile",
# test_json.py
"json.tests",
# test_lib2to3.py
"lib2to3.tests",
# test_logging.py
"win32evtlog",
"win32evtlogutil",
"pywintypes",
# test_lzma.py
"lzma",
# test_macostools.py
"macostools",
# test_msilib.py
"msilib",
# test_namespace_pkgs.py
"foo.one",
"foo.two",
"parent.child.one",
"parent.child.two",
"parent.child.three",
"bar.two",
"a_test",
"parent.child",
"parent",
"bar",
# test_new.py
"Spam",
# test_ossaudiodev.py
"ossaudiodev",
# test_pathlib.py
"pathlib",
# test_platform.py
"gestalt",
# test_pickleable.py
"email.headerregistry",
# test_pkg.py
"t1",
"t2",
"t2.sub",
"t2.sub.subsub",
"t3.sub.subsub",
"t5",
"t6",
"t7",
"t7.sub",
"t7.sub.subsub",
"t8",
"t3.sub",
"t3",
# test_pkgutil.py
"foo",
"foo.bar",
"foo.baz",
"zipimport",
"pkg",
"pkg.subpkg",
"pkg.subpkg.c",
"pkg.subpkg.d",
# test_policy.py
"email.policy",
# test_urllib.py
"urllib.parse",
# test_urllib_response.py
"urllib.response",
# test_repr.py
"""areallylongpackageandmodulenametotestreprtruncation.\
areallylongpackageandmodulenametotestreprtruncation""",
"areallylongpackageandmodulenametotestreprtruncation",
# test_robotparser.py
"urllib.error",
"urllib.robotparser",
# test_runpy.py
"test.script_helper",
# test_secrets.py
"secrets",
# test_selectors.py
"selectors",
# test_statistics.py
"statistics",
# test_shelve.py
"test.test_dbm",
# test_strftime.py
"java",
# test_strop.py
"strop",
# test_sqlite3.py
"sqlite3.test",
# test_sundry.py
"distutils.emxccompiler",
"os2emxpath",
# test_tcl.py
"tkinter",
# test_tk.py
"runtktests",
"tkinter.test",
"tkinter.test.support",
# test_tools.py
"analyze_dxp",
"test_unparse",
"importlib.machinery",
# test_traceback.py
"test_bug737473",
# test_tracemalloc
"tracemalloc",
# test_typing.py
"mock",
"typing.io",
"typing.re",
# test_unittest.py
"unittest.test",
# test_wsgiref.py
"test.test_httpservers",
# test_xml_etree.py
"xml.parsers.expat.errors",
# test_xmlrpc.py
"xmlrpc.client",
# test_zipimport_support.py
"test_zipped_doctest",
"zip_pkg",
# test/test_zipimport_support.py
"test.test_cmd_line_script",
# test_winconsoleio.py
"_testconsole",
# Python3: modules that no longer exist
"commands",
"dummy_thread",
"_dummy_thread",
"httplib",
"Queue",
"sets",
# Python2: modules that don't yet exit
"http.client",
"queue",
"winreg",
# Very old modules with older names
"simplejson",
"sets",
# Standalone mode "site" import flexibilities
"sitecustomize",
"usercustomize",
"apport_python_hook",
"_frozen_importlib",
# Standard library stuff that is optional
"comtypes.server.inprocserver",
"_tkinter",
"_scproxy",
"EasyDialogs",
"SOCKS",
"rourl2path",
"_winapi",
"win32api",
"win32con",
"_gestalt",
"java.lang",
"vms_lib",
"ic",
"readline",
"termios",
"_sysconfigdata",
"al",
"AL",
"sunaudiodev",
"SUNAUDIODEV",
"Audio_mac",
"nis",
"test.test_MimeWriter",
"dos",
"win32pipe",
"Carbon",
"Carbon.Files",
"sgi",
"ctypes.macholib.dyld",
"bsddb3",
"_pybsddb",
"_xmlrpclib",
"netbios",
"win32wnet",
"email.Parser",
"elementree.cElementTree",
"elementree.ElementTree",
"_gbdm",
"resource",
"crypt",
"bz2",
"dbm",
"mmap",
"Mailman",
# Mercurial test
"statprof",
"email.Generator",
"email.Utils",
# setuptools does a lot of speculative stuff
"wincertstore",
"setuptools_svn",
# reportlab does use this if present only and warns about itself.
"pyfribidi2",
"macfs",
# psutils
"_psutil_windows",
# nose
"unittest2",
"IronPython",
"clr",
"compiler.consts",
"new",
# pkg_resources
"pkg_resources.extern",
"ordereddict",
# appdirs
"com",
"win32com",
# gtk
"gdk",
# six
"six.moves",
# Python3 namespace packages.
"_frozen_importlib_external",
# Garbage from PyWin32
"pywin32_bootstrap",
)
def isIgnoreListedNotExistingModule(module_name):
if module_name in sys.builtin_module_names:
raise NuitkaOptimizationError(
"""
Your CPython version has a built-in module '%s', that is not ignore listed
please report this as a bug."""
% module_name,
)
return module_name.hasOneOfNamespaces(getModuleIgnoreList())
| apache-2.0 |
gauravbose/digital-menu | digimenu2/tests/template_tests/syntax_tests/test_if_changed.py | 16 | 9575 | from django.template import Context, Template
from django.test import SimpleTestCase
from ..utils import setup
class IfChangedTagTests(SimpleTestCase):
@setup({'ifchanged01': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged01(self):
output = self.engine.render_to_string('ifchanged01', {'num': (1, 2, 3)})
self.assertEqual(output, '123')
@setup({'ifchanged02': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged02(self):
output = self.engine.render_to_string('ifchanged02', {'num': (1, 1, 3)})
self.assertEqual(output, '13')
@setup({'ifchanged03': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged03(self):
output = self.engine.render_to_string('ifchanged03', {'num': (1, 1, 1)})
self.assertEqual(output, '1')
@setup({'ifchanged04': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged04(self):
output = self.engine.render_to_string('ifchanged04', {'num': (1, 2, 3), 'numx': (2, 2, 2)})
self.assertEqual(output, '122232')
@setup({'ifchanged05': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged05(self):
output = self.engine.render_to_string('ifchanged05', {'num': (1, 1, 1), 'numx': (1, 2, 3)})
self.assertEqual(output, '1123123123')
@setup({'ifchanged06': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged06(self):
output = self.engine.render_to_string('ifchanged06', {'num': (1, 1, 1), 'numx': (2, 2, 2)})
self.assertEqual(output, '1222')
@setup({'ifchanged07': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% for y in numy %}{% ifchanged %}{{ y }}{% endifchanged %}'
'{% endfor %}{% endfor %}{% endfor %}'})
def test_ifchanged07(self):
output = self.engine.render_to_string('ifchanged07', {'num': (1, 1, 1), 'numx': (2, 2, 2), 'numy': (3, 3, 3)})
self.assertEqual(output, '1233323332333')
@setup({'ifchanged08': '{% for data in datalist %}{% for c,d in data %}'
'{% if c %}{% ifchanged %}{{ d }}{% endifchanged %}'
'{% endif %}{% endfor %}{% endfor %}'})
def test_ifchanged08(self):
output = self.engine.render_to_string('ifchanged08', {'datalist': [
[(1, 'a'), (1, 'a'), (0, 'b'), (1, 'c')],
[(0, 'a'), (1, 'c'), (1, 'd'), (1, 'd'), (0, 'e')]
]})
self.assertEqual(output, 'accd')
@setup({'ifchanged-param01': '{% for n in num %}{% ifchanged n %}..{% endifchanged %}'
'{{ n }}{% endfor %}'})
def test_ifchanged_param01(self):
"""
Test one parameter given to ifchanged.
"""
output = self.engine.render_to_string('ifchanged-param01', {'num': (1, 2, 3)})
self.assertEqual(output, '..1..2..3')
@setup({'ifchanged-param02': '{% for n in num %}{% for x in numx %}{% ifchanged n %}..{% endifchanged %}'
'{{ x }}{% endfor %}{% endfor %}'})
def test_ifchanged_param02(self):
output = self.engine.render_to_string('ifchanged-param02', {'num': (1, 2, 3), 'numx': (5, 6, 7)})
self.assertEqual(output, '..567..567..567')
@setup({'ifchanged-param03': '{% for n in num %}{{ n }}{% for x in numx %}'
'{% ifchanged x n %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param03(self):
"""
Test multiple parameters to ifchanged.
"""
output = self.engine.render_to_string('ifchanged-param03', {'num': (1, 1, 2), 'numx': (5, 6, 6)})
self.assertEqual(output, '156156256')
@setup({'ifchanged-param04': '{% for d in days %}{% ifchanged %}{{ d.day }}{% endifchanged %}'
'{% for h in d.hours %}{% ifchanged d h %}{{ h }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param04(self):
"""
Test a date+hour like construct, where the hour of the last day is
the same but the date had changed, so print the hour anyway.
"""
output = self.engine.render_to_string(
'ifchanged-param04',
{'days': [{'hours': [1, 2, 3], 'day': 1}, {'hours': [3], 'day': 2}]},
)
self.assertEqual(output, '112323')
@setup({'ifchanged-param05': '{% for d in days %}{% ifchanged d.day %}{{ d.day }}{% endifchanged %}'
'{% for h in d.hours %}{% ifchanged d.day h %}{{ h }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param05(self):
"""
Logically the same as above, just written with explicit ifchanged
for the day.
"""
output = self.engine.render_to_string(
'ifchanged-param05',
{'days': [{'hours': [1, 2, 3], 'day': 1}, {'hours': [3], 'day': 2}]},
)
self.assertEqual(output, '112323')
@setup({'ifchanged-else01': '{% for id in ids %}{{ id }}'
'{% ifchanged id %}-first{% else %}-other{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else01(self):
"""
Test the else clause of ifchanged.
"""
output = self.engine.render_to_string('ifchanged-else01', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-first,1-other,2-first,2-other,2-other,3-first,')
@setup({'ifchanged-else02': '{% for id in ids %}{{ id }}-'
'{% ifchanged id %}{% cycle red,blue %}{% else %}grey{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else02(self):
output = self.engine.render_to_string('ifchanged-else02', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-red,1-grey,2-blue,2-grey,2-grey,3-red,')
@setup({'ifchanged-else03': '{% for id in ids %}{{ id }}'
'{% ifchanged id %}-{% cycle red,blue %}{% else %}{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else03(self):
output = self.engine.render_to_string('ifchanged-else03', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-red,1,2-blue,2,2,3-red,')
@setup({'ifchanged-else04': '{% for id in ids %}'
'{% ifchanged %}***{{ id }}*{% else %}...{% endifchanged %}'
'{{ forloop.counter }}{% endfor %}'})
def test_ifchanged_else04(self):
output = self.engine.render_to_string('ifchanged-else04', {'ids': [1, 1, 2, 2, 2, 3, 4]})
self.assertEqual(output, '***1*1...2***2*3...4...5***3*6***4*7')
@setup({'ifchanged-filter-ws': '{% load custom %}{% for n in num %}'
'{% ifchanged n|noop:"x y" %}..{% endifchanged %}{{ n }}'
'{% endfor %}'})
def test_ifchanged_filter_ws(self):
"""
Test whitespace in filter arguments
"""
output = self.engine.render_to_string('ifchanged-filter-ws', {'num': (1, 2, 3)})
self.assertEqual(output, '..1..2..3')
class IfChangedTests(SimpleTestCase):
def test_ifchanged_concurrency(self):
"""
#15849 -- ifchanged should be thread-safe.
"""
template = Template('[0{% for x in foo %},{% with var=get_value %}{% ifchanged %}{{ var }}{% endifchanged %}{% endwith %}{% endfor %}]')
# Using generator to mimic concurrency.
# The generator is not passed to the 'for' loop, because it does a list(values)
# instead, call gen.next() in the template to control the generator.
def gen():
yield 1
yield 2
# Simulate that another thread is now rendering.
# When the IfChangeNode stores state at 'self' it stays at '3' and skip the last yielded value below.
iter2 = iter([1, 2, 3])
output2 = template.render(Context({'foo': range(3), 'get_value': lambda: next(iter2)}))
self.assertEqual(output2, '[0,1,2,3]', 'Expected [0,1,2,3] in second parallel template, got {}'.format(output2))
yield 3
gen1 = gen()
output1 = template.render(Context({'foo': range(3), 'get_value': lambda: next(gen1)}))
self.assertEqual(output1, '[0,1,2,3]', 'Expected [0,1,2,3] in first template, got {}'.format(output1))
def test_ifchanged_render_once(self):
"""
#19890. The content of ifchanged template tag was rendered twice.
"""
template = Template('{% ifchanged %}{% cycle "1st time" "2nd time" %}{% endifchanged %}')
output = template.render(Context({}))
self.assertEqual(output, '1st time')
| bsd-3-clause |
a113n/bcbio-nextgen | tests/integration/test_S3_pipelines.py | 3 | 1641 | import os
import subprocess
import pytest
from tests.conftest import make_workdir
from tests.conftest import get_post_process_yaml
@pytest.mark.S3
@pytest.mark.install_required
def test_fusion(install_test_files, data_dir):
"""Run an RNA-seq analysis and test fusion genes, with human-mouse
disambiguation.
Requires minikraken database.
"""
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(data_dir, workdir),
os.path.join(data_dir, os.pardir, "test_fusion"),
os.path.join(data_dir, "run_info-fusion_S3.yaml")]
subprocess.check_call(cl)
@pytest.mark.S3
@pytest.mark.install_required
def test_variantcall_1(install_test_files, data_dir):
"""Test variant calling with disambiguation.
Requires minikraken database.
"""
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(data_dir, workdir),
os.path.join(data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-variantcall_S3_1.yaml")]
subprocess.check_call(cl)
@pytest.mark.S3
@pytest.mark.install_required
def test_variantcall_2(install_test_files, data_dir):
"""Test variant calling with disambiguation.
Requires minikraken database.
"""
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(data_dir, workdir),
os.path.join(data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-variantcall_S3_2.yaml")]
subprocess.check_call(cl)
| mit |
MackZxh/OCA-Choice | contract/contract_show_invoice/__openerp__.py | 3 | 1462 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Domatix (<www.domatix.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Contract Show Invoice',
'summary': 'Button in contracts to show their invoices',
'version': '8.0.1.0.0',
'author': 'Domatix, Odoo Community Association (OCA)',
'website': 'http://www.domatix.com',
'depends': ['account_analytic_analysis'],
'category': 'Sales Management',
'data': [
'views/contract_view.xml',
],
'test': [
'test/contract_show_invoice_test.yml'
],
'installable': True,
'auto_install': False,
}
| lgpl-3.0 |
manipopopo/tensorflow | tensorflow/contrib/autograph/converters/call_trees_test.py | 5 | 4240 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for call_trees module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.converters import call_trees
from tensorflow.contrib.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CallTreesTest(converter_testing.TestCase):
def test_basic(self):
def test_fn_1(_):
raise ValueError('This should not be called in the compiled version.')
def other_test_fn_1(a):
return a + 1
def test_fn_2(a):
return test_fn_1(a) + 1
ns = {'test_fn_1': test_fn_1}
node, ctx = self.prepare(test_fn_2, ns)
node = call_trees.transform(node, ctx)
with self.compiled(node, ns) as result:
new_name, _ = ctx.namer.compiled_function_name(('test_fn_1',))
setattr(result, new_name, other_test_fn_1)
self.assertEquals(result.test_fn_2(1), 3)
def test_dynamic_function(self):
def test_fn_1():
raise ValueError('This should be masked by the mock in self.compiled.')
def test_fn_2(f):
return f() + 3
with self.converted(test_fn_2, call_trees, {}) as result:
# 10 = 7 (from the mock) + 3 (from test_fn_2)
self.assertEquals(10, result.test_fn_2(test_fn_1))
def test_basic_method(self):
class TestClass(object):
def test_fn_1(self, a):
return a + 1
def test_fn_2(self, a):
return self.test_fn_1(a) + 1
ns = {'TestClass': TestClass}
node, ctx = self.prepare(
TestClass.test_fn_2,
ns,
namer=converter_testing.FakeNoRenameNamer(),
arg_types={'self': (TestClass.__name__, TestClass)})
node = call_trees.transform(node, ctx)
with self.compiled(node, ns) as result:
tc = TestClass()
self.assertEquals(3, result.test_fn_2(tc, 1))
def test_py_func_no_retval(self):
def test_fn(a):
setattr(a, 'foo', 'bar')
with self.converted(test_fn, call_trees, {'setattr': setattr}) as result:
with self.test_session() as sess:
class Dummy(object):
pass
a = Dummy()
result.test_fn(a)
py_func_op, = sess.graph.get_operations()
self.assertFalse(hasattr(a, 'foo'))
sess.run(py_func_op)
self.assertEquals('bar', a.foo)
def test_py_func_known_function(self):
def test_fn():
return np.random.binomial(2, 0.5)
with self.converted(test_fn, call_trees, {'np': np},
dtypes.int64) as result:
with self.test_session() as sess:
self.assertTrue(isinstance(result.test_fn(), ops.Tensor))
self.assertIn(sess.run(result.test_fn()), (0, 1, 2))
def test_uncompiled_modules(self):
def test_fn(a):
a = math_ops.multiply(a, constant_op.constant(2))
a = math_ops.add(a, constant_op.constant(1))
return a
ns = {'math_ops': math_ops, 'constant_op': constant_op}
node, ctx = self.prepare(
test_fn,
ns,
arg_types=set(((math_ops.__name__,), (constant_op.__name__,))))
node = call_trees.transform(node, ctx)
with self.compiled(node, ns) as result:
with self.test_session() as sess:
result_tensor = result.test_fn(constant_op.constant(1))
self.assertEquals(sess.run(result_tensor), 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
patrickm/chromium.src | printing/cups_config_helper.py | 2 | 1983 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cups-config wrapper.
cups-config, at least on Ubuntu Lucid and Natty, dumps all
cflags/ldflags/libs when passed the --libs argument. gyp would like
to keep these separate: cflags are only needed when compiling files
that use cups directly, while libs are only needed on the final link
line.
TODO(evan): remove me once
https://bugs.launchpad.net/ubuntu/+source/cupsys/+bug/163704
is fixed.
"""
import subprocess
import sys
def usage():
print 'usage: %s {--cflags|--ldflags|--libs}' % sys.argv[0]
def run_cups_config(mode):
"""Run cups-config with all --cflags etc modes, parse out the mode we want,
and return those flags as a list."""
cups = subprocess.Popen(['cups-config', '--cflags', '--ldflags', '--libs'],
stdout=subprocess.PIPE)
flags = cups.communicate()[0].strip()
flags_subset = []
for flag in flags.split():
flag_mode = None
if flag.startswith('-l'):
flag_mode = '--libs'
elif (flag.startswith('-L') or flag.startswith('-Wl,')):
flag_mode = '--ldflags'
elif (flag.startswith('-I') or flag.startswith('-D')):
flag_mode = '--cflags'
# Be conservative: for flags where we don't know which mode they
# belong in, always include them.
if flag_mode is None or flag_mode == mode:
flags_subset.append(flag)
# Note: cross build is confused by the option, and may trigger linker
# warning causing build error.
if '-lgnutls' in flags_subset:
flags_subset.remove('-lgnutls')
return flags_subset
def main():
if len(sys.argv) != 2:
usage()
return 1
mode = sys.argv[1]
if mode not in ('--cflags', '--libs', '--ldflags'):
usage()
return 1
flags = run_cups_config(mode)
print ' '.join(flags)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
tynn/numpy | benchmarks/benchmarks/bench_function_base.py | 14 | 3841 | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
class Sort(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
np.random.seed(25)
np.random.shuffle(self.o)
# quicksort implementations can have issues with equal elements
self.equal = np.ones(10000)
self.many_equal = np.sort(np.arange(10000) % 10)
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort(self):
np.sort(self.e)
def time_sort_random(self):
np.sort(self.o)
def time_sort_inplace(self):
self.e.sort()
def time_sort_equal(self):
self.equal.sort()
def time_sort_many_equal(self):
self.many_equal.sort()
def time_sort_worst(self):
np.sort(self.worst)
def time_argsort(self):
self.e.argsort()
def time_argsort_random(self):
self.o.argsort()
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| bsd-3-clause |
draugiskisprendimai/odoo | addons/crm/res_partner.py | 159 | 5149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
if partner.is_company:
operator = 'child_of'
else:
operator = '='
opp_ids = self.pool['crm.lead'].search(cr, uid, [('partner_id', operator, partner.id), ('type', '=', 'opportunity'), ('probability', '<', '100')], context=context)
res[partner.id] = {
'opportunity_count': len(opp_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
return res
def _phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_phonecall_count, string="Phonecalls", type="integer"),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
| agpl-3.0 |
fritsvanveen/QGIS | python/plugins/processing/algs/qgis/CheckValidity.py | 5 | 6424 | # -*- coding: utf-8 -*-
"""
***************************************************************************
CheckValidity.py
---------------------
Date : May 2015
Copyright : (C) 2015 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'May 2015'
__copyright__ = '(C) 2015, Arnaud Morvan'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QSettings, QVariant
from qgis.core import Qgis, QgsGeometry, QgsFeature, QgsField, QgsWkbTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
settings_method_key = "/qgis/digitizing/validate_geometries"
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class CheckValidity(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
METHOD = 'METHOD'
VALID_OUTPUT = 'VALID_OUTPUT'
INVALID_OUTPUT = 'INVALID_OUTPUT'
ERROR_OUTPUT = 'ERROR_OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'check_geometry.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Check validity')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.methods = [self.tr('The one selected in digitizing settings'),
'QGIS',
'GEOS']
self.addParameter(ParameterVector(
self.INPUT_LAYER,
self.tr('Input layer')))
self.addParameter(ParameterSelection(
self.METHOD,
self.tr('Method'),
self.methods))
self.addOutput(OutputVector(
self.VALID_OUTPUT,
self.tr('Valid output')))
self.addOutput(OutputVector(
self.INVALID_OUTPUT,
self.tr('Invalid output')))
self.addOutput(OutputVector(
self.ERROR_OUTPUT,
self.tr('Error output')))
def processAlgorithm(self, progress):
settings = QSettings()
initial_method_setting = settings.value(settings_method_key, 1)
method = self.getParameterValue(self.METHOD)
if method != 0:
settings.setValue(settings_method_key, method)
try:
self.doCheck(progress)
finally:
settings.setValue(settings_method_key, initial_method_setting)
def doCheck(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_LAYER))
settings = QSettings()
method = int(settings.value(settings_method_key, 1))
valid_ouput = self.getOutputFromName(self.VALID_OUTPUT)
valid_fields = layer.fields()
valid_writer = valid_ouput.getVectorWriter(
valid_fields,
layer.wkbType(),
layer.crs())
valid_count = 0
invalid_ouput = self.getOutputFromName(self.INVALID_OUTPUT)
invalid_fields = layer.fields().toList() + [
QgsField(name='_errors',
type=QVariant.String,
len=255)]
invalid_writer = invalid_ouput.getVectorWriter(
invalid_fields,
layer.wkbType(),
layer.crs())
invalid_count = 0
error_ouput = self.getOutputFromName(self.ERROR_OUTPUT)
error_fields = [
QgsField(name='message',
type=QVariant.String,
len=255)]
error_writer = error_ouput.getVectorWriter(
error_fields,
QgsWkbTypes.Point,
layer.crs())
error_count = 0
features = vector.features(layer)
total = 100.0 / len(features)
for current, inFeat in enumerate(features):
geom = inFeat.geometry()
attrs = inFeat.attributes()
valid = True
if not geom.isEmpty() and not geom.isGeosEmpty():
errors = list(geom.validateGeometry())
if errors:
# QGIS method return a summary at the end
if method == 1:
errors.pop()
valid = False
reasons = []
for error in errors:
errFeat = QgsFeature()
error_geom = QgsGeometry.fromPoint(error.where())
errFeat.setGeometry(error_geom)
errFeat.setAttributes([error.what()])
error_writer.addFeature(errFeat)
error_count += 1
reasons.append(error.what())
reason = "\n".join(reasons)
if len(reason) > 255:
reason = reason[:252] + '...'
attrs.append(reason)
outFeat = QgsFeature()
outFeat.setGeometry(geom)
outFeat.setAttributes(attrs)
if valid:
valid_writer.addFeature(outFeat)
valid_count += 1
else:
invalid_writer.addFeature(outFeat)
invalid_count += 1
progress.setPercentage(int(current * total))
del valid_writer
del invalid_writer
del error_writer
if valid_count == 0:
valid_ouput.open = False
if invalid_count == 0:
invalid_ouput.open = False
if error_count == 0:
error_ouput.open = False
| gpl-2.0 |
yatinkumbhare/openstack-nova | nova/tests/unit/image/test_s3.py | 69 | 11230 | # Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import os
import tempfile
import eventlet
import fixtures
from mox3 import mox
from nova.api.ec2 import ec2utils
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova import test
from nova.tests.unit.image import fake
ami_manifest_xml = """<?xml version="1.0" ?>
<manifest>
<version>2011-06-17</version>
<bundler>
<name>test-s3</name>
<version>0</version>
<release>0</release>
</bundler>
<machine_configuration>
<architecture>x86_64</architecture>
<block_device_mapping>
<mapping>
<virtual>ami</virtual>
<device>sda1</device>
</mapping>
<mapping>
<virtual>root</virtual>
<device>/dev/sda1</device>
</mapping>
<mapping>
<virtual>ephemeral0</virtual>
<device>sda2</device>
</mapping>
<mapping>
<virtual>swap</virtual>
<device>sda3</device>
</mapping>
</block_device_mapping>
<kernel_id>aki-00000001</kernel_id>
<ramdisk_id>ari-00000001</ramdisk_id>
</machine_configuration>
</manifest>
"""
file_manifest_xml = """<?xml version="1.0" ?>
<manifest>

</manifest>
"""
class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
self.context = context.RequestContext(None, None)
self.useFixture(fixtures.FakeLogger('boto'))
# set up 3 fixtures to test shows, should have id '1', '2', and '3'
db.s3_image_create(self.context,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
db.s3_image_create(self.context,
'a2459075-d96c-40d5-893e-577ff92e721c')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
fake.stub_out_image_service(self.stubs)
self.image_service = s3.S3ImageService()
ec2utils.reset_cache()
def tearDown(self):
super(TestS3ImageService, self).tearDown()
fake.FakeImageService_reset()
def _assertEqualList(self, list0, list1, keys):
self.assertEqual(len(list0), len(list1))
key = keys[0]
for x in list0:
self.assertEqual(len(x), len(keys))
self.assertIn(key, x)
for y in list1:
self.assertIn(key, y)
if x[key] == y[key]:
for k in keys:
self.assertEqual(x[k], y[k])
def test_show_cannot_use_uuid(self):
self.assertRaises(exception.ImageNotFound,
self.image_service.show, self.context,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
def test_show_translates_correctly(self):
self.image_service.show(self.context, '1')
def test_show_translates_image_state_correctly(self):
def my_fake_show(self, context, image_id, **kwargs):
fake_state_map = {
'155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
return {'id': image_id,
'name': 'fakeimage123456',
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'image_state': fake_state_map[image_id]}}
# Override part of the fake image service as well just for
# this test so we can set the image_state to various values
# and test that S3ImageService does the correct mapping for
# us. We can't put fake bad or pending states in the real fake
# image service as it causes other tests to fail
self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
ret_image = self.image_service.show(self.context, '1')
self.assertEqual(ret_image['properties']['image_state'], 'pending')
ret_image = self.image_service.show(self.context, '2')
self.assertEqual(ret_image['properties']['image_state'], 'failed')
ret_image = self.image_service.show(self.context, '3')
self.assertEqual(ret_image['properties']['image_state'], 'available')
def test_detail(self):
self.image_service.detail(self.context)
def test_s3_create(self):
metadata = {'properties': {
'root_device_name': '/dev/sda1',
'block_device_mapping': [
{'device_name': '/dev/sda1',
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]}}
_manifest, image, image_uuid = self.image_service._s3_parse_manifest(
self.context, metadata, ami_manifest_xml)
ret_image = self.image_service.show(self.context, image['id'])
self.assertIn('properties', ret_image)
properties = ret_image['properties']
self.assertIn('mappings', properties)
mappings = properties['mappings']
expected_mappings = [
{"device": "sda1", "virtual": "ami"},
{"device": "/dev/sda1", "virtual": "root"},
{"device": "sda2", "virtual": "ephemeral0"},
{"device": "sda3", "virtual": "swap"}]
self._assertEqualList(mappings, expected_mappings,
['device', 'virtual'])
self.assertIn('block_device_mapping', properties)
block_device_mapping = properties['block_device_mapping']
expected_bdm = [
{'device_name': '/dev/sda1',
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]
self.assertEqual(block_device_mapping, expected_bdm)
def _initialize_mocks(self):
handle, tempf = tempfile.mkstemp(dir='/tmp')
ignore = mox.IgnoreArg()
mockobj = self.mox.CreateMockAnything()
self.stubs.Set(self.image_service, '_conn', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_bucket', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_key', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
mockobj().AndReturn(file_manifest_xml)
self.stubs.Set(self.image_service, '_download_file', mockobj)
mockobj(ignore, ignore, ignore).AndReturn(tempf)
self.stubs.Set(binascii, 'a2b_hex', mockobj)
mockobj(ignore).AndReturn('foo')
mockobj(ignore).AndReturn('foo')
self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
mockobj(ignore, ignore).AndReturn(tempf)
self.mox.ReplayAll()
def test_s3_create_image_locations(self):
image_location_1 = 'testbucket_1/test.img.manifest.xml'
# Use another location that starts with a '/'
image_location_2 = '/testbucket_2/test.img.manifest.xml'
metadata = [{'properties': {'image_location': image_location_1}},
{'properties': {'image_location': image_location_2}}]
for mdata in metadata:
self._initialize_mocks()
image = self.image_service._s3_create(self.context, mdata)
eventlet.sleep()
translated = self.image_service._translate_id_to_uuid(self.context,
image)
uuid = translated['id']
image_service = fake.FakeImageService()
updated_image = image_service.update(self.context, uuid,
{'properties': {'image_state': 'available'}},
purge_props=False)
self.assertEqual(updated_image['properties']['image_state'],
'available')
def test_s3_create_is_public(self):
self._initialize_mocks()
metadata = {'properties': {
'image_location': 'mybucket/my.img.manifest.xml'},
'name': 'mybucket/my.img'}
img = self.image_service._s3_create(self.context, metadata)
eventlet.sleep()
translated = self.image_service._translate_id_to_uuid(self.context,
img)
uuid = translated['id']
image_service = fake.FakeImageService()
updated_image = image_service.update(self.context, uuid,
{'is_public': True}, purge_props=False)
self.assertTrue(updated_image['is_public'])
self.assertEqual(updated_image['status'], 'active')
self.assertEqual(updated_image['properties']['image_state'],
'available')
def test_s3_malicious_tarballs(self):
self.assertRaises(exception.NovaException,
self.image_service._test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
self.assertRaises(exception.NovaException,
self.image_service._test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
| apache-2.0 |
dongguangming/python-phonenumbers | python/phonenumbers/data/region_CL.py | 9 | 4333 | """Auto-generated file, do not edit by hand. CL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CL = PhoneMetadata(id='CL', country_code=56, international_prefix='(?:0|1(?:1[0-69]|2[0-57]|5[13-58]|69|7[0167]|8[018]))0',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[2-9]|600|123)\\d{7,8}', possible_number_pattern='\\d{7,11}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:2\\d{7}|3(?:20|22)\\d{5}|1962\\d{4})|(?:3[2-5]|[47][1-35]|5[1-3578]|6[13-57])\\d{7}', possible_number_pattern='\\d{7,9}', example_number='221234567'),
mobile=PhoneNumberDesc(national_number_pattern='9[4-9]\\d{7}', possible_number_pattern='\\d{8,9}', example_number='961234567'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}|1230\\d{7}', possible_number_pattern='\\d{9,11}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='600\\d{7,8}', possible_number_pattern='\\d{10,11}', example_number='6001234567'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='44\\d{7}', possible_number_pattern='\\d{9}', example_number='441234567'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='600\\d{7,8}', possible_number_pattern='\\d{10,11}', example_number='6001234567'),
national_prefix='0',
national_prefix_for_parsing='0|(1(?:1[0-69]|2[0-57]|5[13-58]|69|7[0167]|8[018]))',
number_format=[NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[23]'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[357]|4[1-35]|6[13-57]'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(9)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(44)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['44'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([68]00)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['60|8'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(600)(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['60'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(1230)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(\\d{5})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['219'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(\\d{4,5})', format='\\1', leading_digits_pattern=['[1-9]'], national_prefix_formatting_rule='\\1')],
intl_number_format=[NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[23]']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[357]|4[1-35]|6[13-57]']),
NumberFormat(pattern='(9)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['9']),
NumberFormat(pattern='(44)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['44']),
NumberFormat(pattern='([68]00)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['60|8']),
NumberFormat(pattern='(600)(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['60']),
NumberFormat(pattern='(1230)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d{5})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['219'])],
mobile_number_portable_region=True)
| apache-2.0 |
vladimir-ipatov/ganeti | test/py/ganeti.utils.log_unittest.py | 8 | 8494 | #!/usr/bin/python
#
# Copyright (C) 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.utils.log"""
import os
import unittest
import logging
import tempfile
import shutil
import threading
from cStringIO import StringIO
from ganeti import constants
from ganeti import errors
from ganeti import compat
from ganeti import utils
import testutils
class TestLogHandler(unittest.TestCase):
def testNormal(self):
tmpfile = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
logger.error("Test message ERROR")
logger.info("Test message INFO")
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 2)
def testReopen(self):
tmpfile = tempfile.NamedTemporaryFile()
tmpfile2 = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
self.assertFalse(utils.ReadFile(tmpfile.name))
self.assertFalse(utils.ReadFile(tmpfile2.name))
logger = logging.Logger("TestLoggerReopen")
logger.addHandler(handler)
for _ in range(3):
logger.error("Test message ERROR")
handler.flush()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 3)
before_id = utils.GetFileID(tmpfile.name)
handler.RequestReopen()
self.assertTrue(handler._reopen)
self.assertTrue(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
# Rename only after requesting reopen
os.rename(tmpfile.name, tmpfile2.name)
assert not os.path.exists(tmpfile.name)
# Write another message, should reopen
for _ in range(4):
logger.info("Test message INFO")
# Flag must be reset
self.assertFalse(handler._reopen)
self.assertFalse(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 4)
self.assertEqual(len(utils.ReadFile(tmpfile2.name).splitlines()), 3)
def testConsole(self):
for (console, check) in [(None, False),
(tempfile.NamedTemporaryFile(), True),
(self._FailingFile(os.devnull), False)]:
# Create a handler which will fail when handling errors
cls = utils.log._LogErrorsToConsole(self._FailingHandler)
# Instantiate handler with file which will fail when writing,
# provoking a write to the console
handler = cls(console, self._FailingFile(os.devnull))
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
# Provoke write
logger.error("Test message ERROR")
# Take everything apart
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
if console and check:
console.flush()
# Check console output
consout = utils.ReadFile(console.name)
self.assertTrue("Cannot log message" in consout)
self.assertTrue("Test message ERROR" in consout)
class _FailingFile(file):
def write(self, _):
raise Exception
class _FailingHandler(logging.StreamHandler):
def handleError(self, _):
raise Exception
class TestSetupLogging(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testSimple(self):
logfile = utils.PathJoin(self.tmpdir, "basic.log")
logger = logging.Logger("TestLogger")
self.assertTrue(callable(utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
# Ensure SetupLogging used custom logger
logging.error("This message should not show up in the test log file")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
def testReopen(self):
logfile = utils.PathJoin(self.tmpdir, "reopen.log")
logfile2 = utils.PathJoin(self.tmpdir, "reopen.log.OLD")
logger = logging.Logger("TestLogger")
reopen_fn = utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)
self.assertTrue(callable(reopen_fn))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
os.rename(logfile, logfile2)
assert not os.path.exists(logfile)
# Notify logger to reopen on the next message
reopen_fn()
assert not os.path.exists(logfile)
# Provoke actual reopen
logger.error("First message")
self.assertTrue(utils.ReadFile(logfile).endswith("First message\n"))
self.assertTrue(utils.ReadFile(logfile2).endswith("This is a test\n"))
class TestSetupToolLogging(unittest.TestCase):
def test(self):
error_name = logging.getLevelName(logging.ERROR)
warn_name = logging.getLevelName(logging.WARNING)
info_name = logging.getLevelName(logging.INFO)
debug_name = logging.getLevelName(logging.DEBUG)
for debug in [False, True]:
for verbose in [False, True]:
logger = logging.Logger("TestLogger")
buf = StringIO()
utils.SetupToolLogging(debug, verbose, _root_logger=logger, _stream=buf)
logger.error("level=error")
logger.warning("level=warning")
logger.info("level=info")
logger.debug("level=debug")
lines = buf.getvalue().splitlines()
self.assertTrue(compat.all(line.count(":") == 3 for line in lines))
messages = [line.split(":", 3)[-1].strip() for line in lines]
if debug:
self.assertEqual(messages, [
"%s level=error" % error_name,
"%s level=warning" % warn_name,
"%s level=info" % info_name,
"%s level=debug" % debug_name,
])
elif verbose:
self.assertEqual(messages, [
"%s level=error" % error_name,
"%s level=warning" % warn_name,
"%s level=info" % info_name,
])
else:
self.assertEqual(messages, [
"level=error",
"level=warning",
])
def testThreadName(self):
thread_name = threading.currentThread().getName()
for enable_threadname in [False, True]:
logger = logging.Logger("TestLogger")
buf = StringIO()
utils.SetupToolLogging(True, True, threadname=enable_threadname,
_root_logger=logger, _stream=buf)
logger.debug("test134042376")
lines = buf.getvalue().splitlines()
self.assertEqual(len(lines), 1)
if enable_threadname:
self.assertTrue((" %s " % thread_name) in lines[0])
else:
self.assertTrue(thread_name not in lines[0])
if __name__ == "__main__":
testutils.GanetiTestProgram()
| gpl-2.0 |
jorge2703/scikit-learn | sklearn/datasets/rcv1.py | 113 | 8170 | """RCV1 dataset.
"""
# Author: Tom Dupre la Tour
# License: BSD 3 clause
import logging
from os.path import exists, join
from gzip import GzipFile
from io import BytesIO
from contextlib import closing
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from ..utils.fixes import makedirs
from ..externals import joblib
from .svmlight_format import load_svmlight_files
from ..utils import shuffle as shuffle_
URL = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a13-vector-files/lyrl2004_vectors')
URL_topics = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a08-topic-qrels/rcv1-v2.topics.qrels.gz')
logger = logging.getLogger()
def fetch_rcv1(data_home=None, subset='all', download_if_missing=True,
random_state=None, shuffle=False):
"""Load the RCV1 multilabel dataset, downloading it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
============== =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
============== =====================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
subset: string, 'train', 'test', or 'all', default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : scipy csr array, dtype np.float64, shape (804414, 47236)
The array has 0.16% of non zero values.
dataset.target : scipy csr array, dtype np.uint8, shape (804414, 103)
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values.
dataset.sample_id : numpy array, dtype np.uint32, shape (804414,)
Identification number of each sample, as ordered in dataset.data.
dataset.target_names : numpy array, dtype object, length (103)
Names of each target (RCV1 topics), as ordered in dataset.target.
dataset.DESCR : string
Description of the RCV1 dataset.
Reference
---------
Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004). RCV1: A new
benchmark collection for text categorization research. The Journal of
Machine Learning Research, 5, 361-397.
"""
N_SAMPLES = 804414
N_FEATURES = 47236
N_CATEGORIES = 103
N_TRAIN = 23149
data_home = get_data_home(data_home=data_home)
rcv1_dir = join(data_home, "RCV1")
if download_if_missing:
makedirs(rcv1_dir, exist_ok=True)
samples_path = join(rcv1_dir, "samples.pkl")
sample_id_path = join(rcv1_dir, "sample_id.pkl")
sample_topics_path = join(rcv1_dir, "sample_topics.pkl")
topics_path = join(rcv1_dir, "topics_names.pkl")
# load data (X) and sample_id
if download_if_missing and (not exists(samples_path) or
not exists(sample_id_path)):
file_urls = ["%s_test_pt%d.dat.gz" % (URL, i) for i in range(4)]
file_urls.append("%s_train.dat.gz" % URL)
files = []
for file_url in file_urls:
logger.warning("Downloading %s" % file_url)
with closing(urlopen(file_url)) as online_file:
# buffer the full file in memory to make possible to Gzip to
# work correctly
f = BytesIO(online_file.read())
files.append(GzipFile(fileobj=f))
Xy = load_svmlight_files(files, n_features=N_FEATURES)
# Training data is before testing data
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
sample_id = sample_id.astype(np.uint32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(sample_id, sample_id_path, compress=9)
else:
X = joblib.load(samples_path)
sample_id = joblib.load(sample_id_path)
# load target (y), categories, and sample_id_bis
if download_if_missing and (not exists(sample_topics_path) or
not exists(topics_path)):
logger.warning("Downloading %s" % URL_topics)
with closing(urlopen(URL_topics)) as online_topics:
f = BytesIO(online_topics.read())
# parse the target file
n_cat = -1
n_doc = -1
doc_previous = -1
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
category_names = {}
for line in GzipFile(fileobj=f, mode='rb'):
line_components = line.decode("ascii").split(u" ")
if len(line_components) == 3:
cat, doc, _ = line_components
if cat not in category_names:
n_cat += 1
category_names[cat] = n_cat
doc = int(doc)
if doc != doc_previous:
doc_previous = doc
n_doc += 1
sample_id_bis[n_doc] = doc
y[n_doc, category_names[cat]] = 1
# Samples in X are ordered with sample_id,
# whereas in y, they are ordered with sample_id_bis.
permutation = _find_permutation(sample_id_bis, sample_id)
y = y[permutation, :]
# save category names in a list, with same order than y
categories = np.empty(N_CATEGORIES, dtype=object)
for k in category_names.keys():
categories[category_names[k]] = k
# reorder categories in lexicographic order
order = np.argsort(categories)
categories = categories[order]
y = sp.csr_matrix(y[:, order])
joblib.dump(y, sample_topics_path, compress=9)
joblib.dump(categories, topics_path, compress=9)
else:
y = joblib.load(sample_topics_path)
categories = joblib.load(topics_path)
if subset == 'all':
pass
elif subset == 'train':
X = X[:N_TRAIN, :]
y = y[:N_TRAIN, :]
sample_id = sample_id[:N_TRAIN]
elif subset == 'test':
X = X[N_TRAIN:, :]
y = y[N_TRAIN:, :]
sample_id = sample_id[N_TRAIN:]
else:
raise ValueError("Unknown subset parameter. Got '%s' instead of one"
" of ('all', 'train', test')" % subset)
if shuffle:
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
return Bunch(data=X, target=y, sample_id=sample_id,
target_names=categories, DESCR=__doc__)
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
def _find_permutation(a, b):
"""find the permutation from a to b"""
t = np.argsort(a)
u = np.argsort(b)
u_ = _inverse_permutation(u)
return t[u_]
| bsd-3-clause |
rtindru/django | tests/introspection/tests.py | 207 | 8471 | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, mock, skipUnlessDBFeature
from .models import Article, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
self.assertIn('introspection_article_view',
connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view',
connection.introspection.table_names())
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertIn(expected, sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
# The following test fails on Oracle due to #17202 (can't correctly
# inspect the length of character columns).
@skipUnlessDBFeature('can_introspect_max_length')
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""With SQLite, foreign keys can be added with different syntaxes."""
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES %s(id));" % Article._meta.db_table
])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, 'id')
def test_get_indexes(self):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def test_get_indexes_multicol(self):
"""
Test that multicolumn indexes are not included in the introspection
results.
"""
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
| bsd-3-clause |
Varentsov/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/protocol.py | 12 | 1163 | import json
import webdriver
"""WebDriver wire protocol codecs."""
class Encoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
kwargs.pop("session")
super(Encoder, self).__init__(*args, **kwargs)
def default(self, obj):
if isinstance(obj, (list, tuple)):
return [self.default(x) for x in obj]
elif isinstance(obj, webdriver.Element):
return {webdriver.Element.identifier: obj.id}
return super(Encoder, self).default(obj)
class Decoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
self.session = kwargs.pop("session")
super(Decoder, self).__init__(
object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, payload):
if isinstance(payload, (list, tuple)):
return [self.object_hook(x) for x in payload]
elif isinstance(payload, dict) and webdriver.Element.identifier in payload:
return webdriver.Element.from_json(payload, self.session)
elif isinstance(payload, dict):
return {k: self.object_hook(v) for k, v in payload.iteritems()}
return payload
| mpl-2.0 |
cudadog/django-allauth | allauth/socialaccount/providers/stackexchange/tests.py | 71 | 1518 | from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import StackExchangeProvider
class StackExchangeTests(create_oauth2_tests(registry.by_id(StackExchangeProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"has_more": false,
"items": [
{
"is_employee": false,
"last_access_date": 1356200390,
"display_name": "pennersr",
"account_id": 291652,
"badge_counts": {
"bronze": 2,
"silver": 2,
"gold": 0
},
"last_modified_date": 1356199552,
"profile_image": "http://www.gravatar.com/avatar/053d648486d567d3143d6bad8df8cfeb?d=identicon&r=PG",
"user_type": "registered",
"creation_date": 1296223711,
"reputation_change_quarter": 148,
"reputation_change_year": 378,
"reputation": 504,
"link": "http://stackoverflow.com/users/593944/pennersr",
"reputation_change_week": 0,
"user_id": 593944,
"reputation_change_month": 10,
"reputation_change_day": 0
}
],
"quota_max": 10000,
"quota_remaining": 9999
}""")
| mit |
lz1988/company-site | django/test/utils.py | 24 | 10922 | import re
import warnings
from xml.dom.minidom import parseString, Node
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.test.signals import template_rendered, setting_changed
from django.utils.encoding import force_str
from django.utils.functional import wraps
from django.utils import six
from django.utils.translation import deactivate
__all__ = (
'Approximate', 'ContextList', 'get_runner', 'override_settings',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
settings._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
settings.ALLOWED_HOSTS = settings._original_allowed_hosts
del settings._original_allowed_hosts
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader
def restore_template_loaders():
"""
Restores the original template loaders after
:meth:`setup_test_template_loader` has been run.
"""
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def disable(self):
settings._wrapped = self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
| bsd-3-clause |
awohns/selection | python_lib/lib/python3.4/site-packages/numpy/testing/nosetester.py | 36 | 19120 | """
Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import warnings
from numpy.compat import basestring
import numpy as np
from .utils import import_nose, suppress_warnings
def get_package_name(filepath):
"""
Given a path where a package is installed, determine its name.
Parameters
----------
filepath : str
Path to a file. If the determination fails, "numpy" is returned.
Examples
--------
>>> np.testing.nosetester.get_package_name('nonsense')
'numpy'
"""
fullpath = filepath[:]
pkg_name = []
while 'site-packages' in filepath or 'dist-packages' in filepath:
filepath, p2 = os.path.split(filepath)
if p2 in ('site-packages', 'dist-packages'):
break
pkg_name.append(p2)
# if package name determination failed, just default to numpy/scipy
if not pkg_name:
if 'scipy' in fullpath:
return 'scipy'
else:
return 'numpy'
# otherwise, reverse to get correct order and return
pkg_name.reverse()
# don't include the outer egg directory
if pkg_name[0].endswith('.egg'):
pkg_name.pop(0)
return '.'.join(pkg_name)
def run_module_suite(file_to_run=None, argv=None):
"""
Run a test module.
Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
the command line
Parameters
----------
file_to_run : str, optional
Path to test module, or None.
By default, run the module from which this function is called.
argv : list of strings
Arguments to be passed to the nose test runner. ``argv[0]`` is
ignored. All command line arguments accepted by ``nosetests``
will work. If it is the default value None, sys.argv is used.
.. versionadded:: 1.9.0
Examples
--------
Adding the following::
if __name__ == "__main__" :
run_module_suite(argv=sys.argv)
at the end of a test module will run the tests when that module is
called in the python interpreter.
Alternatively, calling::
>>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
if file_to_run is None:
f = sys._getframe(1)
file_to_run = f.f_locals.get('__file__', None)
if file_to_run is None:
raise AssertionError
if argv is None:
argv = sys.argv + [file_to_run]
else:
argv = argv + [file_to_run]
nose = import_nose()
from .noseclasses import KnownFailurePlugin
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
class NoseTester(object):
"""
Nose test runner.
This class is made available as numpy.testing.Tester, and a test function
is typically added to a package's __init__.py like so::
from numpy.testing import Tester
test = Tester().test
Calling this test function finds and runs all tests associated with the
package and all its sub-packages.
Attributes
----------
package_path : str
Full path to the package to test.
package_name : str
Name of the package to test.
Parameters
----------
package : module, str or None, optional
The package to test. If a string, this should be the full path to
the package. If None (default), `package` is set to the module from
which `NoseTester` is initialized.
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
Default is "release".
depth : int, optional
If `package` is None, then this can be used to initialize from the
module of the caller of (the caller of (...)) the code that
initializes `NoseTester`. Default of 0 means the module of the
immediate caller; higher values are useful for utility routines that
want to initialize `NoseTester` objects on behalf of other code.
"""
def __init__(self, package=None, raise_warnings="release", depth=0):
# Back-compat: 'None' used to mean either "release" or "develop"
# depending on whether this was a release or develop version of
# numpy. Those semantics were fine for testing numpy, but not so
# helpful for downstream projects like scipy that use
# numpy.testing. (They want to set this based on whether *they* are a
# release or develop version, not whether numpy is.) So we continue to
# accept 'None' for back-compat, but it's now just an alias for the
# default "release".
if raise_warnings is None:
raise_warnings = "release"
package_name = None
if package is None:
f = sys._getframe(1 + depth)
package_path = f.f_locals.get('__file__', None)
if package_path is None:
raise AssertionError
package_path = os.path.dirname(package_path)
package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
package_path = os.path.dirname(package.__file__)
package_name = getattr(package, '__name__', None)
else:
package_path = str(package)
self.package_path = package_path
# Find the package name under test; this name is used to limit coverage
# reporting (if enabled).
if package_name is None:
package_name = get_package_name(package_path)
self.package_name = package_name
# Set to "release" in constructor in maintenance branches.
self.raise_warnings = raise_warnings
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
see ``test`` docstring
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
argv : list
command line arguments that will be passed to nose
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
if not isinstance(label, basestring):
raise TypeError('Selection label should be a string')
if label == 'fast':
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
# When installing with setuptools, and also in some other cases, the
# test_*.py files end up marked +x executable. Nose, by default, does
# not run files marked with +x as they might be scripts. However, in
# our case nose only looks for test_*.py files under the package
# directory, which should be safe.
argv += ['--exe']
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
nose = import_nose()
import numpy
print("NumPy version %s" % numpy.__version__)
relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
npdir = os.path.dirname(numpy.__file__)
print("NumPy is installed in %s" % npdir)
if 'scipy' in self.package_name:
import scipy
print("SciPy version %s" % scipy.__version__)
spdir = os.path.dirname(scipy.__file__)
print("SciPy is installed in %s" % spdir)
pyversion = sys.version.replace('\n', '')
print("Python version %s" % pyversion)
print("nose version %d.%d.%d" % nose.__versioninfo__)
def _get_custom_doctester(self):
""" Return instantiated plugin for doctests
Allows subclassing of this class to override doctester
A return value of None means use the nose builtin doctest plugin
"""
from .noseclasses import NumpyDoctest
return NumpyDoctest()
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False):
"""
Run tests for module using nose.
This method does the heavy lifting for the `test` method. It takes all
the same arguments, for details see `test`.
See Also
--------
test
"""
# fail with nice error message if nose is not present
import_nose()
# compile argv
argv = self._test_argv(label, verbose, extra_argv)
# our way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
'--cover-tests', '--cover-erase']
# construct list of plugins
import nose.plugins.builtin
from .noseclasses import KnownFailurePlugin, Unplugger
plugins = [KnownFailurePlugin()]
plugins += [p() for p in nose.plugins.builtin.plugins]
# add doctesting if required
doctest_argv = '--with-doctest' in argv
if doctests == False and doctest_argv:
doctests = True
plug = self._get_custom_doctester()
if plug is None:
# use standard doctesting
if doctests and not doctest_argv:
argv += ['--with-doctest']
else: # custom doctesting
if doctest_argv: # in fact the unplugger would take care of this
argv.remove('--with-doctest')
plugins += [Unplugger('doctest'), plug]
if doctests:
argv += ['--with-' + plug.name]
return argv, plugins
def test(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, raise_warnings=None):
"""
Run tests for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the tests to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow tests as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
doctests : bool, optional
If True, run doctests in module. Default is False.
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
(This requires the `coverage module:
<http://nedbatchelder.com/code/modules/coverage.html>`_).
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
The default is to use the class initialization value.
Returns
-------
result : object
Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for it.
For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
Running unit tests for numpy.lib
...
Ran 976 tests in 3.933s
OK
>>> result.errors #doctest: +SKIP
[]
>>> result.knownfail #doctest: +SKIP
[]
"""
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
from . import utils
utils.verbose = verbose
if doctests:
print("Running unit tests and doctests for %s" % self.package_name)
else:
print("Running unit tests for %s" % self.package_name)
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
if raise_warnings is None:
raise_warnings = self.raise_warnings
_warn_opts = dict(develop=(Warning,),
release=())
if isinstance(raise_warnings, basestring):
raise_warnings = _warn_opts[raise_warnings]
with suppress_warnings("location") as sup:
# Reset the warning filters to the default state,
# so that running the tests is more repeatable.
warnings.resetwarnings()
# Set all warnings to 'warn', this is because the default 'once'
# has the bad property of possibly shadowing later warnings.
warnings.filterwarnings('always')
# Force the requested warnings to raise
for warningtype in raise_warnings:
warnings.filterwarnings('error', category=warningtype)
# Filter out annoying import messages.
sup.filter(message='Not importing directory')
sup.filter(message="numpy.dtype size changed")
sup.filter(message="numpy.ufunc size changed")
sup.filter(category=np.ModuleDeprecationWarning)
# Filter out boolean '-' deprecation messages. This allows
# older versions of scipy to test without a flood of messages.
sup.filter(message=".*boolean negative.*")
sup.filter(message=".*boolean subtract.*")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from ..distutils import cpuinfo
sup.filter(category=UserWarning, module=cpuinfo)
# See #7949: Filter out deprecation warnings due to the -3 flag to
# python 2
if sys.version_info.major == 2 and sys.py3kwarning:
# This is very specific, so using the fragile module filter
# is fine
import threading
sup.filter(DeprecationWarning,
r"sys\.exc_clear\(\) not supported in 3\.x",
module=threading)
sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
# Filter out some deprecation warnings inside nose 1.3.7 when run
# on python 3.5b2. See
# https://github.com/nose-devs/nose/issues/929
# Note: it is hard to filter based on module for sup (lineno could
# be implemented).
warnings.filterwarnings("ignore", message=".*getargspec.*",
category=DeprecationWarning,
module=r"nose\.")
from .noseclasses import NumpyTestProgram
argv, plugins = self.prepare_test_args(
label, verbose, extra_argv, doctests, coverage)
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
def bench(self, label='fast', verbose=1, extra_argv=None):
"""
Run benchmarks for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the benchmarks to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow benchmarks as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
success : bool
Returns True if running the benchmarks works, False if an error
occurred.
Notes
-----
Benchmarks are like tests, but have names starting with "bench" instead
of "test", and can be found under the "benchmarks" sub-directory of the
module.
Each NumPy module exposes `bench` in its namespace to run all benchmarks
for it.
Examples
--------
>>> success = np.lib.bench() #doctest: +SKIP
Running benchmarks for numpy.lib
...
using 562341 items:
unique:
0.11
unique1d:
0.11
ratio: 1.0
nUnique: 56230 == 56230
...
OK
>>> success #doctest: +SKIP
True
"""
print("Running benchmarks for %s" % self.package_name)
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
# import nose or make informative error
nose = import_nose()
# get plugin to disable doctests
from .noseclasses import Unplugger
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
def _numpy_tester():
if hasattr(np, "__version__") and ".dev0" in np.__version__:
mode = "develop"
else:
mode = "release"
return NoseTester(raise_warnings=mode, depth=1)
| mit |
jblackburne/scikit-learn | sklearn/utils/_scipy_sparse_lsqr_backport.py | 378 | 18021 | """Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperatorLinear}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : (m,) ndarray
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, default 1.0e-8
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int
Explicit limitation on number of iterations (for safety).
show : bool
Display an iteration log.
calc_var : bool
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
"""
A = aslinearoperator(A)
if len(b.shape) > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
nstop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b, alfa*v = A'u.
"""
__xm = np.zeros(m) # a matrix for temporary holding
__xn = np.zeros(n) # a matrix for temporary holding
v = np.zeros(n)
u = b
x = np.zeros(n)
alfa = 0
beta = np.linalg.norm(u)
w = np.zeros(n)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
bnorm = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
| bsd-3-clause |
sbidoul/buildbot | master/buildbot/data/schedulers.py | 11 | 4869 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.data import base
from buildbot.data import masters
from buildbot.data import types
from buildbot.db.schedulers import SchedulerAlreadyClaimedError
class Db2DataMixin(object):
@defer.inlineCallbacks
def db2data(self, dbdict):
master = None
if dbdict['masterid'] is not None:
master = yield self.master.data.get(
('masters', dbdict['masterid']))
data = {
'schedulerid': dbdict['id'],
'name': dbdict['name'],
'enabled': dbdict['enabled'],
'master': master,
}
defer.returnValue(data)
class SchedulerEndpoint(Db2DataMixin, base.Endpoint):
isCollection = False
pathPatterns = """
/schedulers/n:schedulerid
/masters/n:masterid/schedulers/n:schedulerid
"""
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
dbdict = yield self.master.db.schedulers.getScheduler(
kwargs['schedulerid'])
if 'masterid' in kwargs:
if dbdict['masterid'] != kwargs['masterid']:
return
defer.returnValue((yield self.db2data(dbdict))
if dbdict else None)
@defer.inlineCallbacks
def control(self, action, args, kwargs):
if action == 'enable':
schedulerid = kwargs['schedulerid']
v = args['enabled']
yield self.master.data.updates.schedulerEnable(schedulerid, v)
defer.returnValue(None)
class SchedulersEndpoint(Db2DataMixin, base.Endpoint):
isCollection = True
pathPatterns = """
/schedulers
/masters/n:masterid/schedulers
"""
rootLinkName = 'schedulers'
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
schedulers = yield self.master.db.schedulers.getSchedulers(
masterid=kwargs.get('masterid'))
schdicts = yield defer.DeferredList(
[self.db2data(schdict) for schdict in schedulers],
consumeErrors=True, fireOnOneErrback=True)
defer.returnValue([r for (s, r) in schdicts])
class Scheduler(base.ResourceType):
name = "scheduler"
plural = "schedulers"
endpoints = [SchedulerEndpoint, SchedulersEndpoint]
keyFields = ['schedulerid']
eventPathPatterns = """
/schedulers/:schedulerid
"""
class EntityType(types.Entity):
schedulerid = types.Integer()
name = types.String()
enabled = types.Boolean()
master = types.NoneOk(masters.Master.entityType)
entityType = EntityType(name)
@defer.inlineCallbacks
def generateEvent(self, schedulerid, event):
scheduler = yield self.master.data.get(('schedulers', str(schedulerid)))
self.produceEvent(scheduler, event)
@base.updateMethod
@defer.inlineCallbacks
def schedulerEnable(self, schedulerid, v):
yield self.master.db.schedulers.enable(schedulerid, v)
yield self.generateEvent(schedulerid, 'updated')
defer.returnValue(None)
@base.updateMethod
def findSchedulerId(self, name):
return self.master.db.schedulers.findSchedulerId(name)
@base.updateMethod
def trySetSchedulerMaster(self, schedulerid, masterid):
d = self.master.db.schedulers.setSchedulerMaster(
schedulerid, masterid)
# set is successful: deferred result is True
d.addCallback(lambda _: True)
@d.addErrback
def trapAlreadyClaimedError(why):
# the db layer throws an exception if the claim fails; we squash
# that error but let other exceptions continue upward
why.trap(SchedulerAlreadyClaimedError)
# set failed: deferred result is False
return False
return d
@defer.inlineCallbacks
def _masterDeactivated(self, masterid):
schedulers = yield self.master.db.schedulers.getSchedulers(
masterid=masterid)
for sch in schedulers:
yield self.master.db.schedulers.setSchedulerMaster(sch['id'], None)
| gpl-2.0 |
yjmade/odoo | addons/stock_picking_wave/__init__.py | 374 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andrey-malets/web-page-replay | third_party/dns/rcode.py | 248 | 3106 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Result Codes."""
import dns.exception
NOERROR = 0
FORMERR = 1
SERVFAIL = 2
NXDOMAIN = 3
NOTIMP = 4
REFUSED = 5
YXDOMAIN = 6
YXRRSET = 7
NXRRSET = 8
NOTAUTH = 9
NOTZONE = 10
BADVERS = 16
_by_text = {
'NOERROR' : NOERROR,
'FORMERR' : FORMERR,
'SERVFAIL' : SERVFAIL,
'NXDOMAIN' : NXDOMAIN,
'NOTIMP' : NOTIMP,
'REFUSED' : REFUSED,
'YXDOMAIN' : YXDOMAIN,
'YXRRSET' : YXRRSET,
'NXRRSET' : NXRRSET,
'NOTAUTH' : NOTAUTH,
'NOTZONE' : NOTZONE,
'BADVERS' : BADVERS
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be a true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownRcode(dns.exception.DNSException):
"""Raised if an rcode is unknown."""
pass
def from_text(text):
"""Convert text into an rcode.
@param text: the texual rcode
@type text: string
@raises UnknownRcode: the rcode is unknown
@rtype: int
"""
if text.isdigit():
v = int(text)
if v >= 0 and v <= 4095:
return v
v = _by_text.get(text.upper())
if v is None:
raise UnknownRcode
return v
def from_flags(flags, ednsflags):
"""Return the rcode value encoded by flags and ednsflags.
@param flags: the DNS flags
@type flags: int
@param ednsflags: the EDNS flags
@type ednsflags: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: int
"""
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev)
def to_text(value):
"""Convert rcode into text.
@param value: the rcode
@type value: int
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
| apache-2.0 |
duducosmos/pgs4a | python-install/lib/python2.7/test/test_enumerate.py | 84 | 7513 | import unittest
import sys
from test import test_support
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class EnumerateTestCase(unittest.TestCase):
enum = enumerate
seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')]
def test_basicfunction(self):
self.assertEqual(type(self.enum(self.seq)), self.enum)
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
self.enum.__doc__
def test_getitemseqn(self):
self.assertEqual(list(self.enum(G(self.seq))), self.res)
e = self.enum(G(''))
self.assertRaises(StopIteration, e.next)
def test_iteratorseqn(self):
self.assertEqual(list(self.enum(I(self.seq))), self.res)
e = self.enum(I(''))
self.assertRaises(StopIteration, e.next)
def test_iteratorgenerator(self):
self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
e = self.enum(Ig(''))
self.assertRaises(StopIteration, e.next)
def test_noniterable(self):
self.assertRaises(TypeError, self.enum, X(self.seq))
def test_illformediterable(self):
self.assertRaises(TypeError, list, self.enum(N(self.seq)))
def test_exception_propagation(self):
self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum) # no arguments
self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
@test_support.cpython_only
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
# whenever nothing else holds a reference to it
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
class MyEnum(enumerate):
pass
class SubclassTestCase(EnumerateTestCase):
enum = MyEnum
class TestEmpty(EnumerateTestCase):
seq, res = '', []
class TestBig(EnumerateTestCase):
seq = range(10,20000,2)
res = zip(range(20000), seq)
class TestReversed(unittest.TestCase):
def test_simple(self):
class A:
def __getitem__(self, i):
if i < 5:
return str(i)
raise StopIteration
def __len__(self):
return 5
for data in 'abc', range(5), tuple(enumerate('abc')), A(), xrange(1,17,5):
self.assertEqual(list(data)[::-1], list(reversed(data)))
self.assertRaises(TypeError, reversed, {})
# don't allow keyword arguments
self.assertRaises(TypeError, reversed, [], a=1)
def test_classic_class(self):
class A:
def __reversed__(self):
return [2, 1]
self.assertEqual(list(reversed(A())), [2, 1])
def test_xrange_optimization(self):
x = xrange(1)
self.assertEqual(type(reversed(x)), type(iter(x)))
@test_support.cpython_only
def test_len(self):
# This is an implementation detail, not an interface requirement
from test.test_iterlen import len
for s in ('hello', tuple('hello'), list('hello'), xrange(5)):
self.assertEqual(len(reversed(s)), len(s))
r = reversed(s)
list(r)
self.assertEqual(len(r), 0)
class SeqWithWeirdLen:
called = False
def __len__(self):
if not self.called:
self.called = True
return 10
raise ZeroDivisionError
def __getitem__(self, index):
return index
r = reversed(SeqWithWeirdLen())
self.assertRaises(ZeroDivisionError, len, r)
def test_gc(self):
class Seq:
def __len__(self):
return 10
def __getitem__(self, index):
return index
s = Seq()
r = reversed(s)
s.r = r
def test_args(self):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
if not hasattr(sys, "getrefcount"):
return
def f():
pass
r = f.__reversed__ = object()
rc = sys.getrefcount(r)
for i in range(10):
try:
reversed(f)
except TypeError:
pass
else:
self.fail("non-callable __reversed__ didn't raise!")
self.assertEqual(rc, sys.getrefcount(r))
def test_objmethods(self):
# Objects must have __len__() and __getitem__() implemented.
class NoLen(object):
def __getitem__(self): return 1
nl = NoLen()
self.assertRaises(TypeError, reversed, nl)
class NoGetItem(object):
def __len__(self): return 2
ngi = NoGetItem()
self.assertRaises(TypeError, reversed, ngi)
class EnumerateStartTestCase(EnumerateTestCase):
def test_basicfunction(self):
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
class TestStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=11)
seq, res = 'abc', [(11, 'a'), (12, 'b'), (13, 'c')]
class TestLongStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=sys.maxint+1)
seq, res = 'abc', [(sys.maxint+1,'a'), (sys.maxint+2,'b'),
(sys.maxint+3,'c')]
def test_main(verbose=None):
test_support.run_unittest(__name__)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(__name__)
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-2.1 |
lekum/ansible | lib/ansible/module_utils/rax.py | 280 | 11974 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from uuid import UUID
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception, e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception, e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
unicornis/suds-uis | suds/xsd/sxbasic.py | 7 | 23898 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbasic} module provides classes that represent
I{basic} schema objects.
"""
from suds import *
from suds.xsd import *
from suds.xsd.sxbase import *
from suds.xsd.query import *
from suds.sax import Namespace
from suds.transport import TransportError
from suds.reader import DocumentReader
from urlparse import urljoin
from logging import getLogger
log = getLogger(__name__)
class RestrictionMatcher:
"""
For use with L{NodeFinder} to match restriction.
"""
def match(self, n):
return isinstance(n, Restriction)
class TypedContent(Content):
"""
Represents any I{typed} content.
"""
def __init__(self, *args, **kwargs):
Content.__init__(self, *args, **kwargs)
self.resolved_cache = {}
def resolve(self, nobuiltin=False):
"""
Resolve the node's type reference and return the referenced type node.
Returns self if the type is defined locally, e.g. as a <complexType>
subnode. Otherwise returns the referenced external node.
@param nobuiltin: Flag indicating whether resolving to XSD builtin
types should not be allowed.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
"""
cached = self.resolved_cache.get(nobuiltin)
if cached is not None:
return cached
resolved = self.__resolve_type(nobuiltin)
self.resolved_cache[nobuiltin] = resolved
return resolved
def __resolve_type(self, nobuiltin=False):
"""
Private resolve() worker without any result caching.
@param nobuiltin: Flag indicating whether resolving to XSD builtin
types should not be allowed.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
Implementation note:
Note that there is no need for a recursive implementation here since
a node can reference an external type node but there is no way using
WSDL to then make that type node actually be a reference to a different
type node.
"""
qref = self.qref()
if qref is None:
return self
query = TypeQuery(qref)
query.history = [self]
log.debug('%s, resolving: %s\n using:%s', self.id, qref, query)
resolved = query.execute(self.schema)
if resolved is None:
log.debug(self.schema)
raise TypeNotFound(qref)
if resolved.builtin() and nobuiltin:
return self
return resolved
def qref(self):
"""
Get the I{type} qualified reference to the referenced XSD type.
This method takes into account simple types defined through
restriction which are detected by determining that self is simple
(len=0) and by finding a restriction child.
@return: The I{type} qualified reference.
@rtype: qref
"""
qref = self.type
if qref is None and len(self) == 0:
ls = []
m = RestrictionMatcher()
finder = NodeFinder(m, 1)
finder.find(self, ls)
if len(ls):
return ls[0].ref
return qref
class Complex(SchemaObject):
"""
Represents an (XSD) schema <xs:complexType/> node.
@cvar childtags: A list of valid child node names.
@type childtags: (I{str},...)
"""
def childtags(self):
return 'attribute', 'attributeGroup', 'sequence', 'all', 'choice', \
'complexContent', 'simpleContent', 'any', 'group'
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def mixed(self):
for c in self.rawchildren:
if isinstance(c, SimpleContent) and c.mixed():
return True
return False
class Group(SchemaObject):
"""
Represents an (XSD) schema <xs:group/> node.
@cvar childtags: A list of valid child node names.
@type childtags: (I{str},...)
"""
def childtags(self):
return 'sequence', 'all', 'choice'
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = GroupQuery(self.ref)
g = query.execute(self.schema)
if g is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(g)
midx = 0
return midx, deps
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return 'name', 'ref'
class AttributeGroup(SchemaObject):
"""
Represents an (XSD) schema <xs:attributeGroup/> node.
@cvar childtags: A list of valid child node names.
@type childtags: (I{str},...)
"""
def childtags(self):
return 'attribute', 'attributeGroup'
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrGroupQuery(self.ref)
ag = query.execute(self.schema)
if ag is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(ag)
midx = 0
return midx, deps
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return 'name', 'ref'
class Simple(SchemaObject):
"""
Represents an (XSD) schema <xs:simpleType/> node.
"""
def childtags(self):
return 'restriction', 'any', 'list'
def enum(self):
for child, ancestry in self.children():
if isinstance(child, Enumeration):
return True
return False
def mixed(self):
return len(self)
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class List(SchemaObject):
"""
Represents an (XSD) schema <xs:list/> node.
"""
def childtags(self):
return ()
def description(self):
return ('name',)
def xslist(self):
return True
class Restriction(SchemaObject):
"""
Represents an (XSD) schema <xs:restriction/> node.
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return 'enumeration', 'attribute', 'attributeGroup'
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return midx, deps
def restriction(self):
return True
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def description(self):
return ('ref',)
class Collection(SchemaObject):
"""
Represents an (XSD) schema collection node:
- sequence
- choice
- all
"""
def childtags(self):
return 'element', 'sequence', 'all', 'choice', 'any', 'group'
class Sequence(Collection):
"""
Represents an (XSD) schema <xs:sequence/> node.
"""
def sequence(self):
return True
class All(Collection):
"""
Represents an (XSD) schema <xs:all/> node.
"""
def all(self):
return True
class Choice(Collection):
"""
Represents an (XSD) schema <xs:choice/> node.
"""
def choice(self):
return True
class ComplexContent(SchemaObject):
"""
Represents an (XSD) schema <xs:complexContent/> node.
"""
def childtags(self):
return 'attribute', 'attributeGroup', 'extension', 'restriction'
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class SimpleContent(SchemaObject):
"""
Represents an (XSD) schema <xs:simpleContent/> node.
"""
def childtags(self):
return 'extension', 'restriction'
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def mixed(self):
return len(self)
class Enumeration(Content):
"""
Represents an (XSD) schema <xs:enumeration/> node.
"""
def __init__(self, schema, root):
Content.__init__(self, schema, root)
self.name = root.get('value')
def description(self):
return ('name',)
def enum(self):
return True
class Element(TypedContent):
"""
Represents an (XSD) schema <xs:element/> node.
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
a = root.get('form')
if a is not None:
self.form_qualified = ( a == 'qualified' )
a = self.root.get('nillable')
if a is not None:
self.nillable = ( a in ('1', 'true') )
self.implany()
def implany(self):
"""
Set the type as any when implicit.
An implicit <xs:any/> is when an element has not
body and no type defined.
@return: self
@rtype: L{Element}
"""
if self.type is None and \
self.ref is None and \
self.root.isempty():
self.type = self.anytype()
return self
def childtags(self):
return 'attribute', 'simpleType', 'complexType', 'any'
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def dependencies(self):
deps = []
midx = None
e = self.__deref()
if e is not None:
deps.append(e)
midx = 0
return midx, deps
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return 'name', 'ref', 'type'
def anytype(self):
""" create an xsd:anyType reference """
p, u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return ':'.join((mp, 'anyType'))
def namespace(self, prefix=None):
"""
Get this schema element's target namespace.
In case of reference elements, the target namespace is defined by the
referenced and not the referencing element node.
@param prefix: The default prefix.
@type prefix: str
@return: The schema element's target namespace
@rtype: (I{prefix},I{URI})
"""
e = self.__deref()
if e is not None:
return e.namespace(prefix)
return super(Element, self).namespace()
def __deref(self):
if self.ref is None:
return
query = ElementQuery(self.ref)
e = query.execute(self.schema)
if e is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
return e
class Extension(SchemaObject):
"""
Represents an (XSD) schema <xs:extension/> node.
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return 'attribute', 'attributeGroup', 'sequence', 'all', 'choice', \
'group'
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return midx, deps
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def extension(self):
return self.ref is not None
def description(self):
return ('ref',)
class Import(SchemaObject):
"""
Represents an (XSD) schema <xs:import/> node.
@cvar locations: A dictionary of namespace locations.
@type locations: dict
@ivar ns: The imported namespace.
@type ns: str
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
@classmethod
def bind(cls, ns, location=None):
"""
Bind a namespace to a schema location (URI).
This is used for imports that don't specify a schemaLocation.
@param ns: A namespace-uri.
@type ns: str
@param location: The (optional) schema location for the
namespace. (default=ns).
@type location: str
"""
if location is None:
location = ns
cls.locations[ns] = location
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ns = (None, root.get('namespace'))
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, importing ns="%s", location="%s"', self.id, self.ns[1], self.location)
result = self.locate()
if result is None:
if self.location is None:
log.debug('imported schema (%s) not-found', self.ns[1])
else:
result = self.download(options)
log.debug('imported:\n%s', result)
return result
def locate(self):
""" find the schema locally """
if self.ns[1] != self.schema.tns[1]:
return self.schema.locate(self.ns)
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'imported schema (%s) at (%s), failed' % (self.ns[1], url)
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def description(self):
return 'ns', 'location'
class Include(SchemaObject):
"""
Represents an (XSD) schema <xs:include/> node.
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
self.__applytns(root)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'include schema at (%s), failed' % url
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def __applytns(self, root):
""" make sure included schema has same tns. """
TNS = 'targetNamespace'
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, '%s mismatch' % TNS
def description(self):
return 'location'
class Attribute(TypedContent):
"""
Represents an (XSD) <attribute/> node.
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
self.use = root.get('use', default='')
def childtags(self):
return ('restriction',)
def isattr(self):
return True
def get_default(self):
"""
Gets the <xs:attribute default=""/> attribute value.
@return: The default value for the attribute
@rtype: str
"""
return self.root.get('default', default='')
def optional(self):
return self.use != 'required'
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrQuery(self.ref)
a = query.execute(self.schema)
if a is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(a)
midx = 0
return midx, deps
def description(self):
return 'name', 'ref', 'type'
class Any(Content):
"""
Represents an (XSD) <any/> node.
"""
def get_child(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) child')
child = Any(self.schema, root)
return child, []
def get_attribute(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) attribute')
attribute = Any(self.schema, root)
return attribute, []
def any(self):
return True
class Factory:
"""
@cvar tags: A factory to create object objects based on tag.
@type tags: {tag:fn,}
"""
tags = {
'import' : Import,
'include' : Include,
'complexType' : Complex,
'group' : Group,
'attributeGroup' : AttributeGroup,
'simpleType' : Simple,
'list' : List,
'element' : Element,
'attribute' : Attribute,
'sequence' : Sequence,
'all' : All,
'choice' : Choice,
'complexContent' : ComplexContent,
'simpleContent' : SimpleContent,
'restriction' : Restriction,
'enumeration' : Enumeration,
'extension' : Extension,
'any' : Any,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An XSD tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, root, schema):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param schema: A schema object.
@type schema: L{schema.Schema}
@return: The created object.
@rtype: L{SchemaObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(schema, root)
@classmethod
def build(cls, root, schema, filter=('*',)):
"""
Build an xsobject representation.
@param root: An schema XML root.
@type root: L{sax.element.Element}
@param filter: A tag filter.
@type filter: [str,...]
@return: A schema object graph.
@rtype: L{sxbase.SchemaObject}
"""
children = []
for node in root.getChildren(ns=Namespace.xsdns):
if '*' in filter or node.name in filter:
child = cls.create(node, schema)
if child is None:
continue
children.append(child)
c = cls.build(node, schema, child.childtags())
child.rawchildren = c
return children
@classmethod
def collate(cls, children):
imports = []
elements = {}
attributes = {}
types = {}
groups = {}
agrps = {}
for c in children:
if isinstance(c, (Import, Include)):
imports.append(c)
continue
if isinstance(c, Attribute):
attributes[c.qname] = c
continue
if isinstance(c, Element):
elements[c.qname] = c
continue
if isinstance(c, Group):
groups[c.qname] = c
continue
if isinstance(c, AttributeGroup):
agrps[c.qname] = c
continue
types[c.qname] = c
for i in imports:
children.remove(i)
return children, imports, attributes, elements, types, groups, agrps
#######################################################
# Static Import Bindings :-(
#######################################################
Import.bind(
'http://schemas.xmlsoap.org/soap/encoding/',
'suds://schemas.xmlsoap.org/soap/encoding/')
Import.bind(
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2001/xml.xsd')
Import.bind(
'http://www.w3.org/2001/XMLSchema',
'http://www.w3.org/2001/XMLSchema.xsd')
| lgpl-3.0 |
catapult-project/catapult | firefighter/update/handlers/builds.py | 7 | 5951 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
import time
from google.appengine.api import urlfetch
import webapp2
from base import bigquery
from base import constants
from common import buildbot
class Builds(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(300)
bq = bigquery.BigQuery()
current_events = []
events = []
for master_name in constants.MASTER_NAMES:
builders = buildbot.Builders(master_name)
available_builds = _AvailableBuilds(builders)
recorded_builds = _RecordedBuilds(bq, builders, available_builds)
for builder in builders:
# Filter out recorded builds from available builds.
build_numbers = (available_builds[builder.name] -
recorded_builds[builder.name])
builder_current_events, builder_events = _TraceEventsForBuilder(
builder, build_numbers)
current_events += builder_current_events
events += builder_events
jobs = []
if current_events:
jobs += bq.InsertRowsAsync(
constants.DATASET, constants.CURRENT_BUILDS_TABLE,
current_events, truncate=True)
if events:
jobs += bq.InsertRowsAsync(constants.DATASET, constants.BUILDS_TABLE,
events)
for job in jobs:
bq.PollJob(job, 60 * 20) # 20 minutes.
def _AvailableBuilds(builders):
available_builds = {}
for builder in builders:
if not builder.cached_builds:
available_builds[builder.name] = frozenset()
continue
max_build = max(builder.cached_builds)
# Buildbot on tryserver.chromium.perf is occasionally including build 0 in
# its list of cached builds. That results in more builds than we want.
# Limit the list to the last 100 builds, because the urlfetch URL limit is
# 2048 bytes, and "&select=100000" * 100 is 1400 bytes.
builds = frozenset(build for build in builder.cached_builds
if build >= max_build - 100)
available_builds[builder.name] = builds
return available_builds
def _RecordedBuilds(bq, builders, available_builds):
# 105 days / 15 weeks. Must be some number greater than 100 days, because
# we request up to 100 builds (see above comment), and the slowest cron bots
# run one job every day.
start_time_ms = -1000 * 60 * 60 * 24 * 105
table = '%s.%s@%d-' % (constants.DATASET, constants.BUILDS_TABLE,
start_time_ms)
conditions = []
for builder in builders:
if not available_builds[builder.name]:
continue
max_build = max(available_builds[builder.name])
min_build = min(available_builds[builder.name])
conditions.append('WHEN builder = "%s" THEN build >= %d AND build <= %d' %
(builder.name, min_build, max_build))
query = (
'SELECT builder, build '
'FROM [%s] ' % table +
'WHERE CASE %s END ' % ' '.join(conditions) +
'GROUP BY builder, build'
)
query_result = bq.QuerySync(query, 600)
builds = collections.defaultdict(set)
for row in query_result:
builds[row['f'][0]['v']].add(int(row['f'][1]['v']))
return builds
def _TraceEventsForBuilder(builder, build_numbers):
if not build_numbers:
return (), ()
build_numbers_string = ', '.join(map(str, sorted(build_numbers)))
logging.info('Getting %s: %s', builder.name, build_numbers_string)
# Fetch build information and generate trace events.
current_events = []
events = []
builder_builds = builder.builds.Fetch(build_numbers)
query_time = time.time()
for build in builder_builds:
if build.complete:
events += _TraceEventsFromBuild(builder, build, query_time)
else:
current_events += _TraceEventsFromBuild(builder, build, query_time)
return current_events, events
def _TraceEventsFromBuild(builder, build, query_time):
match = re.match(r'(.+) \(([0-9]+)\)', builder.name)
if match:
configuration, host_shard = match.groups()
host_shard = int(host_shard)
else:
configuration = builder.name
host_shard = 0
# Build trace event.
if build.end_time:
build_end_time = build.end_time
else:
build_end_time = query_time
os, os_version, role = _ParseBuilderName(builder.name)
yield {
'name': 'Build %d' % build.number,
'start_time': build.start_time,
'end_time': build_end_time,
'build': build.number,
'builder': builder.name,
'configuration': configuration,
'host_shard': host_shard,
'hostname': build.slave_name,
'master': builder.master_name,
'os': os,
'os_version': os_version,
'role': role,
'status': build.status,
'url': build.url,
}
# Step trace events.
for step in build.steps:
if not step.start_time:
continue
if step.name == 'steps':
continue
if step.end_time:
step_end_time = step.end_time
else:
step_end_time = query_time
yield {
'name': step.name,
'start_time': step.start_time,
'end_time': step_end_time,
'benchmark': step.name, # TODO(dtu): This isn't always right.
'build': build.number,
'builder': builder.name,
'configuration': configuration,
'host_shard': host_shard,
'hostname': build.slave_name,
'master': builder.master_name,
'os': os,
'os_version': os_version,
'role': role,
'status': step.status,
'url': step.url,
}
def _ParseBuilderName(builder_name):
builder_name = builder_name.lower()
for os in ('android', 'linux', 'mac', 'win'):
if os in builder_name:
break
else:
os = None
if 'build' in builder_name or 'compile' in builder_name:
role = 'builder'
else:
role = 'tester'
return (os, None, role)
| bsd-3-clause |
guiquanz/zulip | zilencer/models.py | 125 | 1148 | from django.db import models
import zerver.models
def get_deployment_by_domain(domain):
return Deployment.objects.get(realms__domain=domain)
class Deployment(models.Model):
realms = models.ManyToManyField(zerver.models.Realm, related_name="_deployments")
is_active = models.BooleanField(default=True)
# TODO: This should really become the public portion of a keypair, and
# it should be settable only with an initial bearer "activation key"
api_key = models.CharField(max_length=32, null=True)
base_api_url = models.CharField(max_length=128)
base_site_url = models.CharField(max_length=128)
@property
def endpoints(self):
return {'base_api_url': self.base_api_url, 'base_site_url': self.base_site_url}
@property
def name(self):
# TODO: This only does the right thing for prod because prod authenticates to
# staging with the zulip.com deployment key, while staging is technically the
# deployment for the zulip.com realm.
# This also doesn't necessarily handle other multi-realm deployments correctly.
return self.realms.order_by('pk')[0].domain
| apache-2.0 |
xzturn/caffe2 | caffe2/python/layers/sparse_lookup.py | 1 | 13116 | ## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
get_key,
IdList,
IdScoreList,
LayerPsParam,
ModelLayer,
)
import collections
import functools
import math
import numpy as np
import operator
def get_sparse_lookup_predictor_version(version):
assert version in {'fp32', 'fp16', 'uint8rowwise', 'fused_uint8rowwise'},\
"Unexpected version of sparse_lookup layer {0}".format(version)
return version
def _is_id_list(input_record):
return schema.equal_schemas(input_record, IdList)
def _is_id_score_list(input_record):
return schema.equal_schemas(input_record,
IdScoreList,
check_field_types=False)
class SparseLookup(ModelLayer):
_id_list_supported_reducers = [
'LogMeanExp', 'LogSumExp', 'Max', 'Mean', 'Sum',
'WeightedSum', 'WeightedMean', 'Sqrt', 'None']
_id_score_list_supported_reducers = [
'PositionWeighted', 'Mean', 'Sum', 'WeightedSum', 'WeightedMean', 'None']
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', regularizer=None, **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
# TODO Add some asserts about input type
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0}".\
format(type(inner_shape))
if reducer == "PositionWeighted":
assert _is_id_score_list(self.input_record), (
"PositionWeighted only support IdScoreList, but got {} " +
"please use PositionWeighted layer to convert IdList " +
"to IdScoreList").format(repr(self.input_record))
self.external_weights = input_record.values()
self.reducer = reducer
input_dim = get_categorical_limit(input_record)
assert input_dim > 0, (
"{} should have categorical limit > 0, but got {}".format(
get_key(input_record)(), input_dim))
scale = math.sqrt(1.0 / input_dim)
self.shape = [input_dim] + inner_shape
self.weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
if _is_id_list(self.input_record):
sparse_key = self.input_record.items()
elif _is_id_score_list(self.input_record):
sparse_key = self.input_record.keys()
else:
raise NotImplementedError()
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.w = self.create_param(
param_name='w',
shape=self.shape,
initializer=self.weight_init,
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=sparse_key,
average_length=avg_length),
regularizer=regularizer
)
self.scale_bias_init = ('ConstantFill', {'value': 0.0})
self.scale_bias = self.create_param(
param_name='scale_bias',
shape=[],
initializer=self.scale_bias_init,
optimizer=model.NoOptim,
)
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
self.get_next_blob_reference('output'),
)
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def support_8bit(self):
# Rowwise quantization makes sense only if shape it's 2D matrix with
# second dimension >= 8
if len(self.shape) != 2 or self.shape[1] < 8:
return False
return True
def get_8bits_compatible_parameters(self, fused=True):
if not self.support_8bit():
return []
if fused:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w'
)
return [RowwiseQuantized8BitsWeight(self.w)]
else:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w, scale_bias'
)
return [RowwiseQuantized8BitsWeight(self.w, self.scale_bias)]
def _gather_wrapper(self, net, version, in_indices, out):
# Gather can work on all kinds of input data types, and output
# data with the same type. Convert the output of Gather to float,
# because the follow-up Ops expect fp32.
if version == 'fp32':
return net.Gather([self.w, in_indices], out)
elif version == 'fp16':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.HalfToFloat(gathered_w, out)
elif version == 'uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
gathered_scale_bias = net.Gather(
[self.scale_bias, in_indices],
'gathered_scale_bias'
)
return net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias], out)
elif version == 'fused_uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.Fused8BitRowwiseQuantizedToFloat(gathered_w, out)
else:
raise "Unsupported version of operators in SparseLookup " +\
"layer: {0}".format(version)
def _sparse_lengths_weighted_reducer(
self, in_indices, weights, reducer,
net, version, grad_on_weights=0):
op_input = [
self.w,
weights,
in_indices,
self.input_record.lengths()
]
layer_name = 'SparseLengths' + reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
# deal with sparse features of id_list type
def _add_ops_id_list(self, net, version):
assert self.reducer in self._id_list_supported_reducers, (
"Unsupported reducer: {} for ID_LIST".format(self.reducer)
)
if self.reducer in ['Sum', 'Mean', 'WeightedSum', 'WeightedMean']:
op_input = [self.w,
self.input_record.items(),
self.input_record.lengths()]
# For id list features, the behaviors of 'Sum' and
# 'WeightedSum' are identical, since we can regard the weight on each
# id as 1. Similarly, for 'Mean' and 'WeightedMean'.
if self.reducer == 'WeightedSum':
self.reducer = 'Sum'
elif self.reducer == 'WeightedMean':
self.reducer = 'Mean'
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[net.NextScopedBlob('lengths_sqrt')],
power=0.5,
)
self._sparse_lengths_weighted_reducer(
self.input_record.items(),
sqrt_weight,
'WeightedSum', net, version)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.items(),
self.output_schema.field_blobs())
else:
table_rows = self._gather_wrapper(
net, version, self.input_record.items(), 'table_rows')
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
self.input_record.lengths() + '_sid')
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
)
# deal with sparse features of id_score_list type
def _add_ops_id_score_list(self, net, version):
assert self.reducer in self._id_score_list_supported_reducers, (
"Unsupported reducer: {} for ID_SCORE_LIST".format(self.reducer)
)
if self.reducer in ['WeightedSum', 'WeightedMean']:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.input_record.values(),
self.reducer, net, version)
elif self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.keys(),
self.input_record.lengths()]
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'PositionWeighted':
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.external_weights,
'WeightedSum', net, version, grad_on_weights=1)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.keys(),
self.output_schema.field_blobs())
else:
raise "Only Sum, Mean, None are supported for IdScoreList input." +\
"Trying to create with {}".format(self.reducer)
def add_ops(self, net):
cur_scope = get_current_scope()
version = get_sparse_lookup_predictor_version(
**cur_scope.get(get_sparse_lookup_predictor_version.__name__,
{'version': 'fp32'}))
# TODO(amalevich): Layer should not be responsible for decision about
# quantization.
if not self.support_8bit() and version in {'uint8rowwise',
'fused_uint8rowwise'}:
version = 'fp32'
if _is_id_list(self.input_record):
self._add_ops_id_list(net, version=version)
elif _is_id_score_list(self.input_record):
self._add_ops_id_score_list(net, version=version)
else:
raise "Unsupported input type {0}".format(self.input_record)
| apache-2.0 |
jihyun-kim/heekscnc | nc/emc2tap.py | 32 | 3103 | import nc
import iso_codes
import emc2
class CodesEMC2(iso_codes.Codes):
def SPACE(self): return(' ')
def TAP(self): return('G33.1')
def TAP_DEPTH(self, format, depth): return(self.SPACE() + 'K' + (format % depth))
# This version of COMMENT removes comments from the resultant GCode
#def COMMENT(self,comment): return('')
iso_codes.codes = CodesEMC2()
class CreatorEMC2tap(emc2.CreatorEMC2):
def init(self):
iso.CreatorEMC2.init(self)
# G33.1 tapping with EMC for now
# unsynchronized (chuck) taps NIY (tap_mode = 1)
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
# mystery parameters:
# zretract=None, dwell_bottom=None,pitch=None, stoppos=None, spin_in=None, spin_out=None):
# I dont see how to map these to EMC Gcode
if (standoff == None):
# This is a bad thing. All the drilling cycles need a retraction (and starting) height.
return
if (z == None):
return # We need a Z value as well. This input parameter represents the top of the hole
if (pitch == None):
return # We need a pitch value.
if (direction == None):
return # We need a direction value.
if (tap_mode != 0):
self.comment('only rigid tapping currently supported')
return
self.write_preps()
self.write_blocknum()
self.write_spindle()
self.write('\n')
# rapid to starting point; z first, then x,y iff given
# Set the retraction point to the 'standoff' distance above the starting z height.
retract_height = z + standoff
# unsure if this is needed:
if self.z != retract_height:
self.rapid(z = retract_height)
# then continue to x,y if given
if (x != None) or (y != None):
self.write_blocknum()
self.write(iso_codes.codes.RAPID() )
if (x != None):
self.write(iso_codes.codes.X() + (self.fmt % x))
self.x = x
if (y != None):
self.write(iso_codes.codes.Y() + (self.fmt % y))
self.y = y
self.write('\n')
self.write_blocknum()
self.write( iso_codes.codes.TAP() )
self.write( iso_codes.codes.TAP_DEPTH(self.ffmt,pitch) + iso_codes.codes.SPACE() )
self.write(iso_codes.codes.Z() + (self.fmt % (z - depth))) # This is the 'z' value for the bottom of the tap.
self.write_misc()
self.write('\n')
self.z = retract_height # this cycle returns to the start position, so remember that as z value
nc.creator = CreatorEMC2tap()
| bsd-3-clause |
vmax-feihu/hue | desktop/core/ext-py/ssl-1.15/setup.py | 40 | 9811 | import os, sys
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_lib
from distutils.cmd import Command
from distutils.command.build import build
if (sys.version_info >= (2, 6, 0)):
sys.stderr.write("Skipping building ssl-1.15 because" +
"it is a built-in module in Python" +
"2.6 and later.\n")
sys.exit(0)
elif (sys.version_info < (2, 3, 5)):
sys.stderr.write("Warning: This code has not been tested "
+ "with versions of Python less than 2.3.5.\n")
class Test (Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run (self):
"""Run the regrtest module appropriately"""
# figure out where the _ssl2 extension will be put
b = build(self.distribution)
b.initialize_options()
b.finalize_options()
extdir = os.path.abspath(b.build_platlib)
# now set up the load path
topdir = os.path.dirname(os.path.abspath(__file__))
localtestdir = os.path.join(topdir, "test")
sys.path.insert(0, topdir) # for ssl package
sys.path.insert(0, localtestdir) # for test module
sys.path.insert(0, extdir) # for _ssl2 extension
# make sure the network is enabled
import test.test_support
test.test_support.use_resources = ["network"]
# and load the test and run it
os.chdir(localtestdir)
the_module = __import__("test_ssl", globals(), locals(), [])
# Most tests run to completion simply as a side-effect of
# being imported. For the benefit of tests that can't run
# that way (like test_threaded_import), explicitly invoke
# their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
print 'looking for', f
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
print 'looking for', f
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def find_ssl():
# Detect SSL support for the socket module (via _ssl)
from distutils.ccompiler import new_compiler
compiler = new_compiler()
inc_dirs = compiler.include_dirs + ['/usr/include']
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(compiler, 'ssl',
['/usr/lib'],
['/usr/local/lib',
'/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and ssl_libs is not None):
return ssl_incs, ssl_libs, ['ssl', 'crypto']
raise Exception("No SSL support found")
if (sys.version_info >= (2, 5, 1)):
socket_inc = "./ssl/2.5.1"
else:
socket_inc = "./ssl/2.3.6"
link_args = []
if sys.platform == 'win32':
# Assume the openssl libraries from GnuWin32 are installed in the
# following location:
gnuwin32_dir = os.environ.get("GNUWIN32_DIR", r"C:\Utils\GnuWin32")
# Set this to 1 for a dynamic build (depends on openssl DLLs)
# Dynamic build is about 26k, static is 670k
dynamic = int(os.environ.get("SSL_DYNAMIC", 0))
ssl_incs = [os.environ.get("C_INCLUDE_DIR") or os.path.join(gnuwin32_dir, "include")]
ssl_libs = [os.environ.get("C_LIB_DIR") or os.path.join(gnuwin32_dir, "lib")]
libs = ['ssl', 'crypto', 'wsock32']
if not dynamic:
libs = libs + ['gdi32', 'gw32c', 'ole32', 'uuid']
link_args = ['-static']
else:
ssl_incs, ssl_libs, libs = find_ssl()
testdir = os.path.join(get_python_lib(False), "test")
setup(name='ssl',
version='1.15',
description='SSL wrapper for socket objects (2.3, 2.4, 2.5 compatible)',
long_description=
"""
The old socket.ssl() support for TLS over sockets is being
superseded in Python 2.6 by a new 'ssl' module. This package
brings that module to older Python releases, 2.3.5 and up (it may
also work on older versions of 2.3, but we haven't tried it).
It's quite similar to the 2.6 ssl module. There's no stand-alone
documentation for this package; instead, just use the development
branch documentation for the SSL module at
http://docs.python.org/dev/library/ssl.html.
Version 1.0 had a problem with Python 2.5.1 -- the structure of
the socket object changed from earlier versions.
Version 1.1 was missing various package metadata information.
Version 1.2 added more package metadata, and support for
ssl.get_server_certificate(), and the PEM-to-DER encode/decode
routines. Plus integrated Paul Moore's patch to setup.py for
Windows. Plus added support for asyncore, and asyncore HTTPS
server test.
Version 1.3 fixed a bug in the test suite.
Version 1.4 incorporated use of -static switch.
Version 1.5 fixed bug in Python version check affecting build on
Python 2.5.0.
Version 1.7 (and 1.6) fixed some bugs with asyncore support (recv and
send not being called on the SSLSocket class, wrong semantics for
sendall).
Version 1.8 incorporated some code from Chris Stawarz to handle
sockets which are set to non-blocking before negotiating the SSL
session.
Version 1.9 makes ssl.SSLError a subtype of socket.error.
Version 1.10 fixes a bug in sendall().
Version 1.11 includes the MANIFEST file, and by default will turne
unexpected EOFs occurring during a read into a regular EOF. It also
removes the code for SSLFileStream, to use the regular socket module's
_fileobject instead.
Version 1.12 fixes the bug in SSLSocket.accept() reported by Georg
Brandl, and adds a test case for that fix.
Version 1.13 fixes a bug in calling do_handshake() automatically
on non-blocking sockets. Thanks to Giampaolo Rodola. Now includes
real asyncore test case.
Version 1.14 incorporates some fixes to naming (rename "recv_from" to
"recvfrom" and "send_to" to "sendto"), and a fix to the asyncore test
case to unregister the connection handler when the connection is
closed. It also exposes the SSL shutdown via the "unwrap" method
on an SSLSocket. It exposes "subjectPublicKey" in the data received
from a peer cert.
Version 1.15 fixes a bug in write retries, where the output buffer has
changed location because of garbage collection during the interim.
It also provides the new flag, PROTOCOL_NOSSLv2, which selects SSL23,
but disallows actual use of SSL2.
Authorship: A cast of dozens over the years have written the Python
SSL support, including Marc-Alan Lemburg, Robin Dunn, GvR, Kalle
Svensson, Skip Montanaro, Mark Hammond, Martin von Loewis, Jeremy
Hylton, Andrew Kuchling, Georg Brandl, Bill Janssen, Chris Stawarz,
Neal Norwitz, and many others. Thanks to Paul Moore, David Bolen and
Mark Hammond for help with the Windows side of the house. And it's
all based on OpenSSL, which has its own cast of dozens!
""",
license='Python (MIT-like)',
author='See long_description for details',
author_email='python.ssl.maintainer@gmail.com',
url='http://docs.python.org/dev/library/ssl.html',
cmdclass={'test': Test},
packages=['ssl'],
ext_modules=[Extension('ssl._ssl2', ['ssl/_ssl2.c'],
include_dirs = ssl_incs + [socket_inc],
library_dirs = ssl_libs,
libraries = libs,
extra_link_args = link_args)],
data_files=[(testdir, ['test/test_ssl.py',
'test/keycert.pem',
'test/badcert.pem',
'test/badkey.pem',
'test/nullcert.pem'])],
)
| apache-2.0 |
chouseknecht/ansible | lib/ansible/modules/cloud/cloudstack/cs_volume.py | 25 | 17290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Jefferson Girão <jefferson@girao.net>
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_volume
short_description: Manages volumes on Apache CloudStack based clouds.
description:
- Create, destroy, attach, detach, extract or upload volumes.
version_added: '2.1'
author:
- Jefferson Girão (@jeffersongirao)
- René Moser (@resmo)
options:
name:
description:
- Name of the volume.
- I(name) can only contain ASCII letters.
type: str
required: true
account:
description:
- Account the volume is related to.
type: str
device_id:
description:
- ID of the device on a VM the volume is attached to.
- Only considered if I(state) is C(attached).
type: int
custom_id:
description:
- Custom id to the resource.
- Allowed to Root Admins only.
type: str
disk_offering:
description:
- Name of the disk offering to be used.
- Required one of I(disk_offering), I(snapshot) if volume is not already I(state=present).
type: str
display_volume:
description:
- Whether to display the volume to the end user or not.
- Allowed to Root Admins only.
type: bool
domain:
description:
- Name of the domain the volume to be deployed in.
type: str
max_iops:
description:
- Max iops
type: int
min_iops:
description:
- Min iops
type: int
project:
description:
- Name of the project the volume to be deployed in.
type: str
size:
description:
- Size of disk in GB
type: int
snapshot:
description:
- The snapshot name for the disk volume.
- Required one of I(disk_offering), I(snapshot) if volume is not already I(state=present).
type: str
force:
description:
- Force removal of volume even it is attached to a VM.
- Considered on I(state=absent) only.
default: no
type: bool
shrink_ok:
description:
- Whether to allow to shrink the volume.
default: no
type: bool
vm:
description:
- Name of the virtual machine to attach the volume to.
type: str
zone:
description:
- Name of the zone in which the volume should be deployed.
- If not set, default zone is used.
type: str
state:
description:
- State of the volume.
- The choices C(extracted) and C(uploaded) were added in version 2.8.
type: str
default: present
choices: [ present, absent, attached, detached, extracted, uploaded ]
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
tags:
description:
- List of tags. Tags are a list of dictionaries having keys I(key) and I(value).
- "To delete all tags, set a empty list e.g. I(tags: [])."
type: list
aliases: [ tag ]
version_added: '2.4'
url:
description:
- URL to which the volume would be extracted on I(state=extracted)
- or the URL where to download the volume on I(state=uploaded).
- Only considered if I(state) is C(extracted) or C(uploaded).
type: str
version_added: '2.8'
mode:
description:
- Mode for the volume extraction.
- Only considered if I(state=extracted).
type: str
choices: [ http_download, ftp_upload ]
default: http_download
version_added: '2.8'
format:
description:
- The format for the volume.
- Only considered if I(state=uploaded).
type: str
choices: [ QCOW2, RAW, VHD, VHDX, OVA ]
version_added: '2.8'
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create volume within project and zone with specified storage options
cs_volume:
name: web-vm-1-volume
project: Integration
zone: ch-zrh-ix-01
disk_offering: PerfPlus Storage
size: 20
delegate_to: localhost
- name: create/attach volume to instance
cs_volume:
name: web-vm-1-volume
disk_offering: PerfPlus Storage
size: 20
vm: web-vm-1
state: attached
delegate_to: localhost
- name: detach volume
cs_volume:
name: web-vm-1-volume
state: detached
delegate_to: localhost
- name: remove volume
cs_volume:
name: web-vm-1-volume
state: absent
delegate_to: localhost
# New in version 2.8
- name: Extract DATA volume to make it downloadable
cs_volume:
state: extracted
name: web-vm-1-volume
register: data_vol_out
delegate_to: localhost
- name: Create new volume by downloading source volume
cs_volume:
state: uploaded
name: web-vm-1-volume-2
format: VHD
url: "{{ data_vol_out.url }}"
delegate_to: localhost
'''
RETURN = '''
id:
description: ID of the volume.
returned: success
type: str
sample:
name:
description: Name of the volume.
returned: success
type: str
sample: web-volume-01
display_name:
description: Display name of the volume.
returned: success
type: str
sample: web-volume-01
group:
description: Group the volume belongs to
returned: success
type: str
sample: web
domain:
description: Domain the volume belongs to
returned: success
type: str
sample: example domain
project:
description: Project the volume belongs to
returned: success
type: str
sample: Production
zone:
description: Name of zone the volume is in.
returned: success
type: str
sample: ch-gva-2
created:
description: Date of the volume was created.
returned: success
type: str
sample: 2014-12-01T14:57:57+0100
attached:
description: Date of the volume was attached.
returned: success
type: str
sample: 2014-12-01T14:57:57+0100
type:
description: Disk volume type.
returned: success
type: str
sample: DATADISK
size:
description: Size of disk volume.
returned: success
type: int
sample: 20
vm:
description: Name of the vm the volume is attached to (not returned when detached)
returned: success
type: str
sample: web-01
state:
description: State of the volume
returned: success
type: str
sample: Attached
device_id:
description: Id of the device on user vm the volume is attached to (not returned when detached)
returned: success
type: int
sample: 1
url:
description: The url of the uploaded volume or the download url depending extraction mode.
returned: success when I(state=extracted)
type: str
sample: http://1.12.3.4/userdata/387e2c7c-7c42-4ecc-b4ed-84e8367a1965.vhd
version_added: '2.8'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_required_together,
cs_argument_spec
)
class AnsibleCloudStackVolume(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVolume, self).__init__(module)
self.returns = {
'group': 'group',
'attached': 'attached',
'vmname': 'vm',
'deviceid': 'device_id',
'type': 'type',
'size': 'size',
'url': 'url',
}
self.volume = None
def get_volume(self):
if not self.volume:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'displayvolume': self.module.params.get('display_volume'),
'type': 'DATADISK',
'fetch_list': True,
}
# Do not filter on DATADISK when state=extracted
if self.module.params.get('state') == 'extracted':
del args['type']
volumes = self.query_api('listVolumes', **args)
if volumes:
volume_name = self.module.params.get('name')
for v in volumes:
if volume_name.lower() == v['name'].lower():
self.volume = v
break
return self.volume
def get_snapshot(self, key=None):
snapshot = self.module.params.get('snapshot')
if not snapshot:
return None
args = {
'name': snapshot,
'account': self.get_account('name'),
'domainid': self.get_domain('id'),
'projectid': self.get_project('id'),
}
snapshots = self.query_api('listSnapshots', **args)
if snapshots:
return self._get_by_key(key, snapshots['snapshot'][0])
self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
def present_volume(self):
volume = self.get_volume()
if volume:
volume = self.update_volume(volume)
else:
disk_offering_id = self.get_disk_offering(key='id')
snapshot_id = self.get_snapshot(key='id')
if not disk_offering_id and not snapshot_id:
self.module.fail_json(msg="Required one of: disk_offering,snapshot")
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'diskofferingid': disk_offering_id,
'displayvolume': self.module.params.get('display_volume'),
'maxiops': self.module.params.get('max_iops'),
'miniops': self.module.params.get('min_iops'),
'projectid': self.get_project(key='id'),
'size': self.module.params.get('size'),
'snapshotid': snapshot_id,
'zoneid': self.get_zone(key='id')
}
if not self.module.check_mode:
res = self.query_api('createVolume', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
if volume:
volume = self.ensure_tags(resource=volume, resource_type='Volume')
self.volume = volume
return volume
def attached_volume(self):
volume = self.present_volume()
if volume:
if volume.get('virtualmachineid') != self.get_vm(key='id'):
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
if 'attached' not in volume:
self.result['changed'] = True
args = {
'id': volume['id'],
'virtualmachineid': self.get_vm(key='id'),
'deviceid': self.module.params.get('device_id'),
}
if not self.module.check_mode:
res = self.query_api('attachVolume', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def detached_volume(self):
volume = self.present_volume()
if volume:
if 'attached' not in volume:
return volume
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('detachVolume', id=volume['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def absent_volume(self):
volume = self.get_volume()
if volume:
if 'attached' in volume and not self.module.params.get('force'):
self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
res = self.query_api('deleteVolume', id=volume['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'volume')
return volume
def update_volume(self, volume):
args_resize = {
'id': volume['id'],
'diskofferingid': self.get_disk_offering(key='id'),
'maxiops': self.module.params.get('max_iops'),
'miniops': self.module.params.get('min_iops'),
'size': self.module.params.get('size')
}
# change unit from bytes to giga bytes to compare with args
volume_copy = volume.copy()
volume_copy['size'] = volume_copy['size'] / (2**30)
if self.has_changed(args_resize, volume_copy):
self.result['changed'] = True
if not self.module.check_mode:
args_resize['shrinkok'] = self.module.params.get('shrink_ok')
res = self.query_api('resizeVolume', **args_resize)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
self.volume = volume
return volume
def extract_volume(self):
volume = self.get_volume()
if not volume:
self.module.fail_json(msg="Failed: volume not found")
args = {
'id': volume['id'],
'url': self.module.params.get('url'),
'mode': self.module.params.get('mode').upper(),
'zoneid': self.get_zone(key='id')
}
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('extractVolume', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
self.volume = volume
return volume
def upload_volume(self):
volume = self.get_volume()
if not volume:
disk_offering_id = self.get_disk_offering(key='id')
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'format': self.module.params.get('format'),
'url': self.module.params.get('url'),
'diskofferingid': disk_offering_id,
}
if not self.module.check_mode:
res = self.query_api('uploadVolume', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
if volume:
volume = self.ensure_tags(resource=volume, resource_type='Volume')
self.volume = volume
return volume
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
disk_offering=dict(),
display_volume=dict(type='bool'),
max_iops=dict(type='int'),
min_iops=dict(type='int'),
size=dict(type='int'),
snapshot=dict(),
vm=dict(),
device_id=dict(type='int'),
custom_id=dict(),
force=dict(type='bool', default=False),
shrink_ok=dict(type='bool', default=False),
state=dict(default='present', choices=[
'present',
'absent',
'attached',
'detached',
'extracted',
'uploaded',
]),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
url=dict(),
mode=dict(choices=['http_download', 'ftp_upload'], default='http_download'),
format=dict(choices=['QCOW2', 'RAW', 'VHD', 'VHDX', 'OVA']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['snapshot', 'disk_offering'],
),
required_if=[
('state', 'uploaded', ['url', 'format']),
],
supports_check_mode=True
)
acs_vol = AnsibleCloudStackVolume(module)
state = module.params.get('state')
if state in ['absent']:
volume = acs_vol.absent_volume()
elif state in ['attached']:
volume = acs_vol.attached_volume()
elif state in ['detached']:
volume = acs_vol.detached_volume()
elif state == 'extracted':
volume = acs_vol.extract_volume()
elif state == 'uploaded':
volume = acs_vol.upload_volume()
else:
volume = acs_vol.present_volume()
result = acs_vol.get_result(volume)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
drcapulet/sentry | src/sentry/api/endpoints/project_group_index.py | 9 | 12177 | from __future__ import absolute_import, division, print_function
from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.app import search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.constants import (
DEFAULT_SORT_OPTION, STATUS_CHOICES
)
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, Group, GroupBookmark, GroupSeen, GroupStatus, TagKey
)
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
from sentry.utils.dates import parse_date
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
merge = serializers.BooleanField()
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
# bookmarks=0/1
# status=<x>
# <tag>=<value>
def get(self, request, project):
"""
List a project's aggregates
Return a list of aggregates bound to a project.
{method} {path}
A default query of 'is:resolved' is applied. To return results with
other statuses send an new query value (i.e. ?query= for all results).
Any standard Sentry structured search query can be passed via the
``query`` parameter.
"""
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "invalid status"}', status=400)
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
# TODO: dates should include timestamps
date_from = request.GET.get('since')
time_from = request.GET.get('until')
date_filter = request.GET.get('date_filter')
date_to = request.GET.get('dt')
time_to = request.GET.get('tt')
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
return Response('{"detail": "invalid limit"}', status=400)
today = timezone.now()
# date format is Y-m-d
if any(x is not None for x in [date_from, time_from, date_to, time_to]):
date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to)
else:
date_from = today - timedelta(days=5)
date_to = None
query_kwargs['date_from'] = date_from
query_kwargs['date_to'] = date_to
if date_filter:
query_kwargs['date_filter'] = date_filter
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved')
if query is not None:
query_kwargs.update(parse_query(query, request.user))
cursor_result = search.query(**query_kwargs)
context = list(cursor_result)
response = Response(serialize(context, request.user, StreamGroupSerializer()))
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
def put(self, request, project):
"""
Bulk mutate a list of aggregates
Bulk mutate various attributes on aggregates.
{method} {path}?id=1&id=2&id=3
{{
"status": "resolved",
"isBookmarked": true
}}
- For non-status updates, the 'id' parameter is required.
- For status updates, the 'id' parameter may be omitted for a batch
"update all" query.
- An optional 'status' parameter may be used to restrict mutations to
only events with the given status.
For example, to resolve all aggregates (project-wide):
{method} {path}
{{
"status": "resolved"
}}
Attributes:
- status: resolved, unresolved, muted
- hasSeen: true, false
- isBookmarked: true, false
- isPublic: true, false
- merge: true, false
If any ids are out of scope this operation will succeed without any data
mutation.
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
# validate that we've passed a selector for non-status bulk operations
if not group_ids and result.keys() != ['status']:
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if group_ids:
filters = [Q(id__in=group_ids)]
else:
filters = [Q(project=project)]
if request.GET.get('status'):
try:
status_filter = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "Invalid status"}', status=400)
filters.append(Q(status=status_filter))
if result.get('status') == 'resolved':
now = timezone.now()
happened = Group.objects.filter(*filters).exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
activity.send_notification()
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = Group.objects.filter(*filters).exclude(
status=new_status,
).update(
status=new_status,
)
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
elif new_status == GroupStatus.MUTED:
activity_type = Activity.SET_MUTED
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=request.user,
)
activity.send_notification()
if result.get('hasSeen'):
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=request.user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=group.project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isPublic'):
Group.objects.filter(
id__in=group_ids,
).update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=request.user,
)
elif result.get('isPublic') is False:
Group.objects.filter(
id__in=group_ids,
).update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=request.user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
for group in group_list:
if group == primary_group:
continue
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
)
return Response(dict(result))
def delete(self, request, project):
"""
Bulk remove a list of aggregates
Permanently remove the given aggregates.
Only queries by 'id' are accepted.
{method} {path}?id=1&id=2&id=3
If any ids are out of scope this operation will succeed without any data
mutation
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
# TODO(dcramer): set status to pending deletion
for group in group_list:
delete_group.delay(object_id=group.id)
return Response(status=204)
| bsd-3-clause |
xiandiancloud/edxplaltfom-xusong | common/djangoapps/course_groups/tests/test_views.py | 20 | 7018 | import json
from django.test.client import RequestFactory
from django.test.utils import override_settings
from factory import post_generation, Sequence
from factory.django import DjangoModelFactory
from course_groups.models import CourseUserGroup
from course_groups.views import add_users_to_cohort
from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class CohortFactory(DjangoModelFactory):
FACTORY_FOR = CourseUserGroup
name = Sequence("cohort{}".format)
course_id = "dummy_id"
group_type = CourseUserGroup.COHORT
@post_generation
def users(self, create, extracted, **kwargs): # pylint: disable=W0613
self.users.add(*extracted)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class AddUsersToCohortTestCase(ModuleStoreTestCase):
def setUp(self):
self.course = CourseFactory.create()
self.staff_user = UserFactory.create(is_staff=True)
self.cohort1_users = [UserFactory.create() for _ in range(3)]
self.cohort2_users = [UserFactory.create() for _ in range(2)]
self.cohort3_users = [UserFactory.create() for _ in range(2)]
self.cohortless_users = [UserFactory.create() for _ in range(3)]
self.cohort1 = CohortFactory.create(course_id=self.course.id, users=self.cohort1_users)
self.cohort2 = CohortFactory.create(course_id=self.course.id, users=self.cohort2_users)
self.cohort3 = CohortFactory.create(course_id=self.course.id, users=self.cohort3_users)
def check_request(
self,
users_string,
expected_added=None,
expected_changed=None,
expected_present=None,
expected_unknown=None
):
"""
Check that add_users_to_cohort returns the expected result and has the
expected side effects. The given users will be added to cohort1.
users_string is the string input entered by the client
expected_added is a list of users
expected_changed is a list of (user, previous_cohort) tuples
expected_present is a list of (user, email/username) tuples where
email/username corresponds to the input
expected_unknown is a list of strings corresponding to the input
"""
expected_added = expected_added or []
expected_changed = expected_changed or []
expected_present = expected_present or []
expected_unknown = expected_unknown or []
request = RequestFactory().post("dummy_url", {"users": users_string})
request.user = self.staff_user
response = add_users_to_cohort(request, self.course.id.to_deprecated_string(), self.cohort1.id)
self.assertEqual(response.status_code, 200)
result = json.loads(response.content)
self.assertEqual(result.get("success"), True)
self.assertItemsEqual(
result.get("added"),
[
{"username": user.username, "name": user.profile.name, "email": user.email}
for user in expected_added
]
)
self.assertItemsEqual(
result.get("changed"),
[
{
"username": user.username,
"name": user.profile.name,
"email": user.email,
"previous_cohort": previous_cohort
}
for (user, previous_cohort) in expected_changed
]
)
self.assertItemsEqual(
result.get("present"),
[username_or_email for (_, username_or_email) in expected_present]
)
self.assertItemsEqual(result.get("unknown"), expected_unknown)
for user in expected_added + [user for (user, _) in expected_changed + expected_present]:
self.assertEqual(
CourseUserGroup.objects.get(
course_id=self.course.id,
group_type=CourseUserGroup.COHORT,
users__id=user.id
),
self.cohort1
)
def test_empty(self):
self.check_request("")
def test_only_added(self):
self.check_request(
",".join([user.username for user in self.cohortless_users]),
expected_added=self.cohortless_users
)
def test_only_changed(self):
self.check_request(
",".join([user.username for user in self.cohort2_users + self.cohort3_users]),
expected_changed=(
[(user, self.cohort2.name) for user in self.cohort2_users] +
[(user, self.cohort3.name) for user in self.cohort3_users]
)
)
def test_only_present(self):
usernames = [user.username for user in self.cohort1_users]
self.check_request(
",".join(usernames),
expected_present=[(user, user.username) for user in self.cohort1_users]
)
def test_only_unknown(self):
usernames = ["unknown_user{}".format(i) for i in range(3)]
self.check_request(
",".join(usernames),
expected_unknown=usernames
)
def test_all(self):
unknowns = ["unknown_user{}".format(i) for i in range(3)]
self.check_request(
",".join(
unknowns +
[
user.username
for user in self.cohortless_users + self.cohort1_users + self.cohort2_users + self.cohort3_users
]
),
self.cohortless_users,
(
[(user, self.cohort2.name) for user in self.cohort2_users] +
[(user, self.cohort3.name) for user in self.cohort3_users]
),
[(user, user.username) for user in self.cohort1_users],
unknowns
)
def test_emails(self):
unknown = "unknown_user@example.com"
self.check_request(
",".join([
self.cohort1_users[0].email,
self.cohort2_users[0].email,
self.cohortless_users[0].email,
unknown
]),
[self.cohortless_users[0]],
[(self.cohort2_users[0], self.cohort2.name)],
[(self.cohort1_users[0], self.cohort1_users[0].email)],
[unknown]
)
def test_delimiters(self):
unknown = "unknown_user"
self.check_request(
" {} {}\t{}, \r\n{}".format(
unknown,
self.cohort1_users[0].username,
self.cohort2_users[0].username,
self.cohortless_users[0].username
),
[self.cohortless_users[0]],
[(self.cohort2_users[0], self.cohort2.name)],
[(self.cohort1_users[0], self.cohort1_users[0].username)],
[unknown]
)
| agpl-3.0 |
Dreizan/csci1200OnlineCourse | scripts/modules.py | 6 | 11360 | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage fetching and installation of extension modules to CourseBuilder.
To run, use the wrapper script in this directory since it configures paths for
you:
sh scripts/modules.sh [args]
For example, to bring in the LTI module you can run
sh scripts/modules.sh \
--targets=lti@https://github.com/google/coursebuilder-lti-module
"""
__author__ = 'Mike Gainer (mgainer@google.com)'
import argparse
import collections
import logging
import os
import subprocess
import sys
import time
from common import yaml_files
# Standard name of manifest file within a module.
_MANIFEST_NAME = 'module.yaml'
# Number of attempts to kill wayward subprocesses before giving up entirely.
_KILL_ATTEMPTS = 5
# Command line flags supported
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--targets', default=[], type=lambda s: s.split(','),
help=(
'List of modules to use. Multiple modules may be listed separated by '
'commas. If a module has already been downloaded, or is on the '
'list of well-known modules (see scripts/module_config.py source), '
'then the module source need not be provided. If the module needs '
'to be downloaded, then the name of the module should be followed by '
'an "@" character, and then the URL at which the module is '
'available. E.g., '
'--modules=example@https://github.com/my-company/my_example_module'
))
# Logging.
_LOG = logging.getLogger('coursebuilder.models.module_config')
logging.basicConfig()
_LOG.setLevel(logging.INFO)
# Convenience types with just data, no behavior.
WellKnownModule = collections.namedtuple(
'WellKnownModule', ['name', 'method', 'location'])
# List of modules for which we already know the source location.
_WELL_KNOWN_MODULES = {
'lti': WellKnownModule(
'lti', 'git',
'https://github.com/google/coursebuilder-lti-module'),
'xblock': WellKnownModule(
'xblock', 'git',
'https://github.com/google/coursebuilder_xblock_module'),
}
def _die(message):
_LOG.critical(message)
raise Exception(message)
def _assert_path_exists(path, message):
if not os.path.exists(path):
_die(message)
def _run_process(args, patience_seconds=10):
proc = subprocess.Popen(args)
cmdline = ' '.join(args)
start = time.time()
max_expected = start + patience_seconds
absolute_max = start + patience_seconds + _KILL_ATTEMPTS
while time.time() < absolute_max:
if time.time() > max_expected:
proc.kill()
proc.poll()
if proc.returncode is not None:
if proc.returncode == 0:
return
else:
_die('The command "%s" completed with exit code %d. Please '
'run that command manually, ascertain and remedy the '
'problem, and try again.' % (cmdline, proc.returncode))
sys.exit(1)
_die('The command "%s" failed to complete after %d seconds, and '
'%d attempts to kill it. You should manually kill the process. '
'Please run that command manually and ascertain and remedy the '
'problem.' % (cmdline, int(time.time() - start), _KILL_ATTEMPTS))
def _download_if_needed(name, location, module_install_dir):
manifest_path = os.path.join(module_install_dir, _MANIFEST_NAME)
if os.path.exists(manifest_path):
return
_LOG.info('Downloading module %s', name)
all_modules_dir = os.path.dirname(module_install_dir)
if not os.path.exists(all_modules_dir):
os.makedirs(all_modules_dir)
if name in _WELL_KNOWN_MODULES:
method = _WELL_KNOWN_MODULES[name].method
if not location:
location = _WELL_KNOWN_MODULES[name].location
else:
if not location:
_die('Module "%s" needs to be downloaded, but its location was '
'not provided on the command line.' % name)
method = _infer_method_from_location(location)
if method == 'git':
_run_process(['git', 'clone', location, module_install_dir])
elif method == 'cp-r':
_run_process(['cp', '-r', location, module_install_dir])
else:
_die('We would like to download module "%s" ' % name +
'from location "%s", ' % location +
'but no implementation for downloading via %s ' % method +
'has been implemented as yet.')
_assert_path_exists(
manifest_path,
'Modules are expected to contain ' +
'a manifest file named "%s" ' % _MANIFEST_NAME +
'in their root directory when installed. ' +
'Module %s at path %s does not. ' % (name, manifest_path))
def _infer_method_from_location(location):
# Not terribly sophisticated. When modules start showing up at places
# other than github, we can make this a little smarter.
if location.startswith('https://github.com/'):
return 'git'
# For testing, and/or pulling in not-really-third party stuff from
# elsewhere in a local work environment.
if location.startswith('/tmp/') or location.startswith('../'):
return 'cp-r'
return 'unknown'
def _install_if_needed(app_yaml, name, module_install_dir, coursebuilder_home):
# Verify version compatibility before attempting installation.
coursebuilder_version = app_yaml.get_env('GCB_PRODUCT_VERSION')
module = yaml_files.ModuleManifest(
os.path.join(module_install_dir, _MANIFEST_NAME))
module.assert_version_compatibility(coursebuilder_version)
# This is the best we can do as far as verifying that a module has been
# installed. Modules have quite a bit of free rein as far as what-all
# they may or may not do -- setting up $CB/modules/<modulename> is the
# only hard requirement. Note that this may even be a softlink, so
# we just test for existence, not is-a-directory.
if os.path.exists(os.path.join(coursebuilder_home, 'modules', name)):
return module
_LOG.info('Installing module %s', name)
# Verify setup script exists and give a nice error message if not (rather
# than letting _run_process emit an obscure error).
install_script_path = os.path.join(
module_install_dir, 'scripts', 'setup.sh')
_assert_path_exists(
install_script_path,
'Modules are expected to provide a script to perform installation of '
'the module at <module-root>/scripts/setup.sh No such file was found '
'in module %s' % name)
# Have $PWD set to the install directory for the module when calling
# setup.sh, just in case the setup script needs to discover its own
# location in order to set up softlinks.
cwd = os.getcwd()
try:
os.chdir(module_install_dir)
_run_process(['bash', install_script_path, '-d', coursebuilder_home])
finally:
os.chdir(cwd)
# Verify setup script exists and give a nice error message if not (rather
# than letting _run_process emit an obscure error).
install_script_path = os.path.join(
module_install_dir, 'scripts', 'setup.sh')
init_file_path = os.path.join(
coursebuilder_home, 'modules', name, '__init__.py')
_assert_path_exists(
init_file_path,
'After installing module %s, there should have been an __init__.py '
'file present at the path %s, but there was not.' % (
name, init_file_path))
return module
def _update_appengine_libraries(app_yaml, modules):
for module in modules:
for lib in module.appengine_libraries:
app_yaml.require_library(lib['name'], lib['version'])
def _construct_third_party_libraries(modules):
libs_str_parts = []
libs = {}
for module in modules:
for lib in module.third_party_libraries:
name = lib['name']
internal_path = lib.get('internal_path')
if lib['name'] in libs:
if internal_path != libs[name]:
raise ValueError(
'Module %s ' % module.module_name +
'specifies third party library "%s" ' % name +
'with internal path "%s" ' % internal_path +
'but this is incompatible with the '
'already-specified internal path "%s"' % libs[name])
else:
if internal_path:
libs_str_parts.append(' %s:%s' % (name, internal_path))
else:
libs_str_parts.append(' %s' % name)
return ''.join(libs_str_parts)
def _update_third_party_libraries(app_yaml, modules):
libs_str = _construct_third_party_libraries(modules)
app_yaml.set_env('GCB_THIRD_PARTY_LIBRARIES', libs_str)
def _update_enabled_modules(app_yaml, modules):
new_val = ' '.join([module.main_module for module in modules])
app_yaml.set_env('GCB_THIRD_PARTY_MODULES', new_val)
def _update_tests(coursebuilder_home, modules):
tests = {}
for module in modules:
tests.update(module.tests)
yaml_path = os.path.join(coursebuilder_home, 'scripts',
'third_party_tests.yaml')
if tests:
_LOG.info('Updating scripts/third_party_tests.yaml')
with open(yaml_path, 'w') as fp:
fp.write('tests:\n')
for test in sorted(tests):
fp.write(' %s: %d\n' % (test, tests[test]))
else:
if os.path.exists(yaml_path):
os.unlink(yaml_path)
def main(args, coursebuilder_home, modules_home):
modules = []
app_yaml = yaml_files.AppYamlFile(
os.path.join(coursebuilder_home, 'app.yaml'))
for module_name in args.targets:
parts = module_name.split('@')
name = parts[0]
location = parts[1] if len(parts) > 1 else None
install_dir = os.path.join(modules_home, name)
_download_if_needed(name, location, install_dir)
module = _install_if_needed(app_yaml, name, install_dir,
coursebuilder_home)
modules.append(module)
_update_tests(coursebuilder_home, modules)
_LOG.info('Updating app.yaml')
_update_appengine_libraries(app_yaml, modules)
_update_third_party_libraries(app_yaml, modules)
_update_enabled_modules(app_yaml, modules)
app_yaml.write()
if app_yaml.application == 'mycourse':
_LOG.warning('The application name in app.yaml is "mycourse". You '
'should change this from its default value before '
'uploading to AppEngine.')
if __name__ == '__main__':
main(PARSER.parse_args(),
os.environ['COURSEBUILDER_HOME'],
os.environ['MODULES_HOME'])
| apache-2.0 |
shaufi10/odoo | addons/lunch/wizard/__init__.py | 440 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lunch_validation
import lunch_cancel
import lunch_order
| agpl-3.0 |
crakensio/django_training | lib/python2.7/site-packages/django/db/models/sql/datastructures.py | 114 | 1853 | """
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col), []
class DateTime(object):
"""
Add a datetime selection column.
"""
def __init__(self, col, lookup_type, tzname):
self.col = col
self.lookup_type = lookup_type
self.tzname = tzname
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname)
| cc0-1.0 |
Zouyiran/ryu | .eggs/pbr-1.8.1-py2.7.egg/pbr/tests/test_core.py | 86 | 5269 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import glob
import os
import tarfile
import fixtures
from pbr.tests import base
class TestCore(base.BaseTestCase):
cmd_names = ('pbr_test_cmd', 'pbr_test_cmd_with_class')
def check_script_install(self, install_stdout):
for cmd_name in self.cmd_names:
install_txt = 'Installing %s script to %s' % (cmd_name,
self.temp_dir)
self.assertIn(install_txt, install_stdout)
cmd_filename = os.path.join(self.temp_dir, cmd_name)
script_txt = open(cmd_filename, 'r').read()
self.assertNotIn('pkg_resources', script_txt)
stdout, _, return_code = self._run_cmd(cmd_filename)
self.assertIn("PBR", stdout)
def test_setup_py_keywords(self):
"""setup.py --keywords.
Test that the `./setup.py --keywords` command returns the correct
value without balking.
"""
self.run_setup('egg_info')
stdout, _, _ = self.run_setup('--keywords')
assert stdout == 'packaging,distutils,setuptools'
def test_sdist_extra_files(self):
"""Test that the extra files are correctly added."""
stdout, _, return_code = self.run_setup('sdist', '--formats=gztar')
# There can be only one
try:
tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0]
except IndexError:
assert False, 'source dist not found'
tf = tarfile.open(tf_path)
names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()]
self.assertIn('extra-file.txt', names)
def test_console_script_install(self):
"""Test that we install a non-pkg-resources console script."""
if os.name == 'nt':
self.skipTest('Windows support is passthrough')
stdout, _, return_code = self.run_setup(
'install_scripts', '--install-dir=%s' % self.temp_dir)
self.useFixture(
fixtures.EnvironmentVariable('PYTHONPATH', '.'))
self.check_script_install(stdout)
def test_console_script_develop(self):
"""Test that we develop a non-pkg-resources console script."""
if os.name == 'nt':
self.skipTest('Windows support is passthrough')
self.useFixture(
fixtures.EnvironmentVariable(
'PYTHONPATH', ".:%s" % self.temp_dir))
stdout, _, return_code = self.run_setup(
'develop', '--install-dir=%s' % self.temp_dir)
self.check_script_install(stdout)
class TestGitSDist(base.BaseTestCase):
def setUp(self):
super(TestGitSDist, self).setUp()
stdout, _, return_code = self._run_cmd('git', ('init',))
if return_code:
self.skipTest("git not installed")
stdout, _, return_code = self._run_cmd('git', ('add', '.'))
stdout, _, return_code = self._run_cmd(
'git', ('commit', '-m', 'Turn this into a git repo'))
stdout, _, return_code = self.run_setup('sdist', '--formats=gztar')
def test_sdist_git_extra_files(self):
"""Test that extra files found in git are correctly added."""
# There can be only one
tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0]
tf = tarfile.open(tf_path)
names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()]
self.assertIn('git-extra-file.txt', names)
| apache-2.0 |
Drooids/odoo | addons/l10n_ro/res_partner.py | 309 | 2255 | # -*- encoding: utf-8 -*-
##############################################################################
#
# @author - Fekete Mihai <feketemihai@gmail.com>
# Copyright (C) 2011 TOTAL PC SYSTEMS (http://www.www.erpsystems.ro).
# Copyright (C) 2009 (<http://www.filsystem.ro>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = "res.partner"
_inherit = "res.partner"
_columns = {
'nrc' : fields.char('NRC', help='Registration number at the Registry of Commerce'),
}
def _auto_init(self, cr, context=None):
result = super(res_partner, self)._auto_init(cr, context=context)
# Remove constrains for vat, nrc on "commercial entities" because is not mandatory by legislation
# Even that VAT numbers are unique, the NRC field is not unique, and there are certain entities that
# doesn't have a NRC number plus the formatting was changed few times, so we cannot have a base rule for
# checking if available and emmited by the Ministry of Finance, only online on their website.
cr.execute("""
DROP INDEX IF EXISTS res_partner_vat_uniq_for_companies;
DROP INDEX IF EXISTS res_partner_nrc_uniq_for_companies;
""")
return result
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['nrc']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JuliBakagianni/CEF-ELRC | lib/python2.7/site-packages/django/views/decorators/cache.py | 229 | 3639 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return wraps(viewfunc, assigned=available_attrs(viewfunc))(_cache_controlled)
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view_func)
| bsd-3-clause |
chawlanikhil24/glustercli-python | gluster/cli/quota.py | 1 | 4960 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
from .utils import quota_execute, quota_execute_xml, volume_execute
from .parsers import parse_quota_list_paths, parse_quota_list_objects
def inode_quota_enable(volname):
"""
Enable Inode Quota
:param volname: Volume Name
:returns: Output of inode-quota Enable command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = ["inode-quota", volname, "enable"]
return volume_execute(cmd)
def enable(volname):
"""
Enable Quota
:param volname: Volume Name
:returns: Output of quota Enable command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "enable"]
return quota_execute(cmd)
def disable(volname):
"""
Disable Inode Quota
:param volname: Volume Name
:returns: Output of quota Disable command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "disable"]
return quota_execute(cmd)
def list_paths(volname, paths=[]):
"""
Get Quota List
:param volname: Volume Name
:param paths: Optional list of paths
:returns: Quota list of paths, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "list"] + paths
return parse_quota_list_paths(quota_execute_xml(cmd))
def list_objects(volname, paths=[]):
"""
Get Quota Objects List
:param volname: Volume Name
:param paths: Optional list of paths
:returns: Quota list of objects, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "list"] + paths
return parse_quota_list_objects(quota_execute_xml(cmd))
def remove_path(volname, path):
"""
Remove Path from Quota list
:param volname: Volume Name
:param path: Path to remove from quota
:returns: Output of Quota remove-path, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "remove-path", path]
return quota_execute(cmd)
def remove_objects(volname, path):
"""
Remove Objects for a given path
:param volname: Volume Name
:param path: Path to remove from quota
:returns: Output of Quota remove-objects, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "remove-objects", path]
return quota_execute(cmd)
def default_soft_limit(volname, percent):
"""
Set default soft limit
:param volname: Volume Name
:param percent: Percent of soft limit
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "default-soft-limit", "{0}".format(percent)]
return quota_execute(cmd)
def limit_usage(volname, path, size, percent=None):
"""
Limit quota usage
:param volname: Volume Name
:param path: Path to limit quota
:param size: Limit Size
:param percent: Percentage
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "limit-usage", path, "{0}".format(size)]
if percent is not None:
cmd += ["{0}".format(percent)]
return quota_execute(cmd)
def limit_objects(volname, path, num, percent=None):
"""
Limit objects
:param volname: Volume Name
:param path: Path to limit quota
:param num: Limit Number
:param percent: Percentage
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "limit-objects", path, "{0}".format(num)]
if percent is not None:
cmd += ["{0}".format(percent)]
return quota_execute(cmd)
def alert_time(volname, alert_time):
"""
Set Alert Time
:param volname: Volume Name
:param alert_time: Alert Time Value
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "alert-time", "{0}".format(alert_time)]
return quota_execute(cmd)
def soft_timeout(volname, timeout):
"""
Set Soft Timeout
:param volname: Volume Name
:param timeout: Timeout Value
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "soft-timeout", "{0}".format(timeout)]
return quota_execute(cmd)
def hard_timeout(volname, timeout):
"""
Set Hard Timeout
:param volname: Volume Name
:param timeout: Timeout Value
:returns: Output of the command, raises
GlusterCmdException((rc, out, err)) on error
"""
cmd = [volname, "hard-timeout", "{0}".format(timeout)]
return quota_execute(cmd)
| gpl-2.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps39/src/sound_event_detection.py | 56 | 6116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
def event_detection(feature_data, model_container, hop_length_seconds=0.01, smoothing_window_length_seconds=1.0,
decision_threshold=0.0, minimum_event_length=0.1, minimum_event_gap=0.1):
"""Sound event detection
Parameters
----------
feature_data : numpy.ndarray [shape=(n_features, t)]
Feature matrix
model_container : dict
Sound event model pairs [positive and negative] in dict
hop_length_seconds : float > 0.0
Feature hop length in seconds, used to convert feature index into time-stamp
(Default value=0.01)
smoothing_window_length_seconds : float > 0.0
Accumulation window (look-back) length, withing the window likelihoods are accumulated.
(Default value=1.0)
decision_threshold : float > 0.0
Likelihood ratio threshold for making the decision.
(Default value=0.0)
minimum_event_length : float > 0.0
Minimum event length in seconds, shorten than given are filtered out from the output.
(Default value=0.1)
minimum_event_gap : float > 0.0
Minimum allowed gap between events in seconds from same event label class.
(Default value=0.1)
Returns
-------
results : list (event dicts)
Detection result, event list
"""
smoothing_window = int(smoothing_window_length_seconds / hop_length_seconds)
results = []
for event_label in model_container['models']:
positive = model_container['models'][event_label]['positive'].score_samples(feature_data)[0]
negative = model_container['models'][event_label]['negative'].score_samples(feature_data)[0]
# Lets keep the system causal and use look-back while smoothing (accumulating) likelihoods
for stop_id in range(0, feature_data.shape[0]):
start_id = stop_id - smoothing_window
if start_id < 0:
start_id = 0
positive[start_id] = sum(positive[start_id:stop_id])
negative[start_id] = sum(negative[start_id:stop_id])
likelihood_ratio = positive - negative
event_activity = likelihood_ratio > decision_threshold
# Find contiguous segments and convert frame-ids into times
event_segments = contiguous_regions(event_activity) * hop_length_seconds
# Preprocess the event segments
event_segments = postprocess_event_segments(event_segments=event_segments,
minimum_event_length=minimum_event_length,
minimum_event_gap=minimum_event_gap)
for event in event_segments:
results.append((event[0], event[1], event_label))
return results
def contiguous_regions(activity_array):
"""Find contiguous regions from bool valued numpy.array.
Transforms boolean values for each frame into pairs of onsets and offsets.
Parameters
----------
activity_array : numpy.array [shape=(t)]
Event activity array, bool values
Returns
-------
change_indices : numpy.ndarray [shape=(2, number of found changes)]
Onset and offset indices pairs in matrix
"""
# Find the changes in the activity_array
change_indices = numpy.diff(activity_array).nonzero()[0]
# Shift change_index with one, focus on frame after the change.
change_indices += 1
if activity_array[0]:
# If the first element of activity_array is True add 0 at the beginning
change_indices = numpy.r_[0, change_indices]
if activity_array[-1]:
# If the last element of activity_array is True, add the length of the array
change_indices = numpy.r_[change_indices, activity_array.size]
# Reshape the result into two columns
return change_indices.reshape((-1, 2))
def postprocess_event_segments(event_segments, minimum_event_length=0.1, minimum_event_gap=0.1):
"""Post process event segment list. Makes sure that minimum event length and minimum event gap conditions are met.
Parameters
----------
event_segments : numpy.ndarray [shape=(2, number of event)]
Event segments, first column has the onset, second has the offset.
minimum_event_length : float > 0.0
Minimum event length in seconds, shorten than given are filtered out from the output.
(Default value=0.1)
minimum_event_gap : float > 0.0
Minimum allowed gap between events in seconds from same event label class.
(Default value=0.1)
Returns
-------
event_results : numpy.ndarray [shape=(2, number of event)]
postprocessed event segments
"""
# 1. remove short events
event_results_1 = []
for event in event_segments:
if event[1] - event[0] >= minimum_event_length:
event_results_1.append((event[0], event[1]))
if len(event_results_1):
# 2. remove small gaps between events
event_results_2 = []
# Load first event into event buffer
buffered_event_onset = event_results_1[0][0]
buffered_event_offset = event_results_1[0][1]
for i in range(1, len(event_results_1)):
if event_results_1[i][0] - buffered_event_offset > minimum_event_gap:
# The gap between current event and the buffered is bigger than minimum event gap,
# store event, and replace buffered event
event_results_2.append((buffered_event_onset, buffered_event_offset))
buffered_event_onset = event_results_1[i][0]
buffered_event_offset = event_results_1[i][1]
else:
# The gap between current event and the buffered is smalle than minimum event gap,
# extend the buffered event until the current offset
buffered_event_offset = event_results_1[i][1]
# Store last event from buffer
event_results_2.append((buffered_event_onset, buffered_event_offset))
return event_results_2
else:
return event_results_1
| mit |
dneg/gaffer | doc/GafferUserGuide/images/autoGenerated_source/interface_layouts_figAA.py | 10 | 2044 | import IECore
import GafferUI
import GafferScene
import GafferSceneUI
import os
scriptNode = script
scriptWindow = GafferUI.ScriptWindow.acquire( script )
layout = eval( "GafferUI.CompoundEditor( scriptNode, children = ( GafferUI.SplitContainer.Orientation.Horizontal, 0.495974, ( ( GafferUI.SplitContainer.Orientation.Vertical, 0.529083, ( {'tabs': (GafferUI.Viewer( scriptNode ),), 'tabsVisible': True, 'currentTab': 0, 'pinned': [False]}, ( GafferUI.SplitContainer.Orientation.Vertical, 0.918072, ( {'tabs': (GafferUI.NodeGraph( scriptNode ),), 'tabsVisible': True, 'currentTab': 0, 'pinned': [None]}, {'tabs': (GafferUI.Timeline( scriptNode ),), 'tabsVisible': False, 'currentTab': 0, 'pinned': [None]} ) ) ) ), ( GafferUI.SplitContainer.Orientation.Horizontal, 0.494105, ( ( GafferUI.SplitContainer.Orientation.Vertical, 0.651007, ( {'tabs': (GafferUI.NodeEditor( scriptNode ),), 'tabsVisible': True, 'currentTab': 0, 'pinned': [False]}, {'tabs': (GafferSceneUI.SceneInspector( scriptNode ),), 'tabsVisible': True, 'currentTab': 0, 'pinned': [False]} ) ), {'tabs': (GafferSceneUI.SceneHierarchy( scriptNode ),), 'tabsVisible': True, 'currentTab': 0, 'pinned': [False]} ) ) ) ) )" )
scriptWindow.setLayout( layout )
scriptWindow._Widget__qtWidget.resize(995,500)
for nodeName in ['Group5']:
script.selection().add( script.descendant( nodeName ) )
script.context()["ui:scene:expandedPaths"] = GafferScene.PathMatcherData( GafferScene.PathMatcher( ['/', '/group', '/group/group', '/group/group/plane', '/group/group/sphere', '/group/group1', '/group/group1/group', '/group/group1/group/plane', '/group/group1/group/sphere', '/group/group1/group1', '/group/group1/group1/group', '/group/group1/group1/text', '/group/group2', '/group/group2/group', '/group/group2/group/plane', '/group/group2/group/sphere'] ) )
script.context()["ui:scene:selectedPaths"] = IECore.StringVectorData( [ "/group/group2/group/sphere" ] )
##############################################################
## IMAGE SPECIFIC COMMANDS BELOW #############################
| bsd-3-clause |
Electroscholars/P.E.E.R.S | MainWindowArrowTest/youtube_dl/extractor/sockshare.py | 23 | 2628 | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
determine_ext,
ExtractorError,
)
from .common import InfoExtractor
class SockshareIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sockshare\.com/file/(?P<id>[0-9A-Za-z]+)'
_FILE_DELETED_REGEX = r'This file doesn\'t exist, or has been removed\.</div>'
_TEST = {
'url': 'http://www.sockshare.com/file/437BE28B89D799D7',
'md5': '9d0bf1cfb6dbeaa8d562f6c97506c5bd',
'info_dict': {
'id': '437BE28B89D799D7',
'title': 'big_buck_bunny_720p_surround.avi',
'ext': 'avi',
'thumbnail': 're:^http://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://sockshare.com/file/%s' % video_id
webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
confirm_hash = self._html_search_regex(r'''(?x)<input\s+
type="hidden"\s+
value="([^"]*)"\s+
name="hash"
''', webpage, 'hash')
fields = {
"hash": confirm_hash,
"confirm": "Continue as Free User"
}
post = compat_urllib_parse.urlencode(fields)
req = compat_urllib_request.Request(url, post)
# Apparently, this header is required for confirmation to work.
req.add_header('Host', 'www.sockshare.com')
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
req, video_id, 'Downloading video page')
video_url = self._html_search_regex(
r'<a href="([^"]*)".+class="download_file_link"',
webpage, 'file url')
video_url = "http://www.sockshare.com" + video_url
title = self._html_search_regex((
r'<h1>(.+)<strong>',
r'var name = "([^"]+)";'),
webpage, 'title', default=None)
thumbnail = self._html_search_regex(
r'<img\s+src="([^"]*)".+?name="bg"',
webpage, 'thumbnail')
formats = [{
'format_id': 'sd',
'url': video_url,
'ext': determine_ext(title),
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| mit |
Tithen-Firion/youtube-dl | youtube_dl/extractor/animeondemand.py | 45 | 11014 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_str,
)
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
sanitized_Request,
urlencode_postdata,
)
class AnimeOnDemandIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
_NETRC_MACHINE = 'animeondemand'
_TESTS = [{
# jap, OmU
'url': 'https://www.anime-on-demand.de/anime/161',
'info_dict': {
'id': '161',
'title': 'Grimgar, Ashes and Illusions (OmU)',
'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
},
'playlist_mincount': 4,
}, {
# Film wording is used instead of Episode, ger/jap, Dub/OmU
'url': 'https://www.anime-on-demand.de/anime/39',
'only_matching': True,
}, {
# Episodes without titles, jap, OmU
'url': 'https://www.anime-on-demand.de/anime/162',
'only_matching': True,
}, {
# ger/jap, Dub/OmU, account required
'url': 'https://www.anime-on-demand.de/anime/169',
'only_matching': True,
}, {
# Full length film, non-series, ger/jap, Dub/OmU, account required
'url': 'https://www.anime-on-demand.de/anime/185',
'only_matching': True,
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page:
self.raise_geo_restricted(
'%s is only available in German-speaking countries of Europe' % self.IE_NAME)
login_form = self._form_hidden_inputs('new_user', login_page)
login_form.update({
'user[login]': username,
'user[password]': password,
})
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
request = sanitized_Request(
post_url, urlencode_postdata(login_form))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
error = self._search_regex(
r'<p class="alert alert-danger">(.+?)</p>',
response, 'error', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
anime_id = self._match_id(url)
webpage = self._download_webpage(url, anime_id)
if 'data-playlist=' not in webpage:
self._download_webpage(
self._APPLY_HTML5_URL, anime_id,
'Activating HTML5 beta', 'Unable to apply HTML5 beta')
webpage = self._download_webpage(url, anime_id)
csrf_token = self._html_search_meta(
'csrf-token', webpage, 'csrf token', fatal=True)
anime_title = self._html_search_regex(
r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
webpage, 'anime name')
anime_description = self._html_search_regex(
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
webpage, 'anime description', default=None)
entries = []
def extract_info(html, video_id, num=None):
title, description = [None] * 2
formats = []
for input_ in re.findall(
r'<input[^>]+class=["\'].*?streamstarter_html5[^>]+>', html):
attributes = extract_attributes(input_)
playlist_urls = []
for playlist_key in ('data-playlist', 'data-otherplaylist'):
playlist_url = attributes.get(playlist_key)
if isinstance(playlist_url, compat_str) and re.match(
r'/?[\da-zA-Z]+', playlist_url):
playlist_urls.append(attributes[playlist_key])
if not playlist_urls:
continue
lang = attributes.get('data-lang')
lang_note = attributes.get('value')
for playlist_url in playlist_urls:
kind = self._search_regex(
r'videomaterialurl/\d+/([^/]+)/',
playlist_url, 'media kind', default=None)
format_id_list = []
if lang:
format_id_list.append(lang)
if kind:
format_id_list.append(kind)
if not format_id_list and num is not None:
format_id_list.append(compat_str(num))
format_id = '-'.join(format_id_list)
format_note = ', '.join(filter(None, (kind, lang_note)))
request = sanitized_Request(
compat_urlparse.urljoin(url, playlist_url),
headers={
'X-Requested-With': 'XMLHttpRequest',
'X-CSRF-Token': csrf_token,
'Referer': url,
'Accept': 'application/json, text/javascript, */*; q=0.01',
})
playlist = self._download_json(
request, video_id, 'Downloading %s playlist JSON' % format_id,
fatal=False)
if not playlist:
continue
start_video = playlist.get('startvideo', 0)
playlist = playlist.get('playlist')
if not playlist or not isinstance(playlist, list):
continue
playlist = playlist[start_video]
title = playlist.get('title')
if not title:
continue
description = playlist.get('description')
for source in playlist.get('sources', []):
file_ = source.get('file')
if not file_:
continue
ext = determine_ext(file_)
format_id_list = [lang, kind]
if ext == 'm3u8':
format_id_list.append('hls')
elif source.get('type') == 'video/dash' or ext == 'mpd':
format_id_list.append('dash')
format_id = '-'.join(filter(None, format_id_list))
if ext == 'm3u8':
file_formats = self._extract_m3u8_formats(
file_, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)
elif source.get('type') == 'video/dash' or ext == 'mpd':
continue
file_formats = self._extract_mpd_formats(
file_, video_id, mpd_id=format_id, fatal=False)
else:
continue
for f in file_formats:
f.update({
'language': lang,
'format_note': format_note,
})
formats.extend(file_formats)
return {
'title': title,
'description': description,
'formats': formats,
}
def extract_entries(html, video_id, common_info, num=None):
info = extract_info(html, video_id, num)
if info['formats']:
self._sort_formats(info['formats'])
f = common_info.copy()
f.update(info)
entries.append(f)
# Extract teaser/trailer only when full episode is not available
if not info['formats']:
m = re.search(
r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>(?P<kind>Teaser|Trailer)<',
html)
if m:
f = common_info.copy()
f.update({
'id': '%s-%s' % (f['id'], m.group('kind').lower()),
'title': m.group('title'),
'url': compat_urlparse.urljoin(url, m.group('href')),
})
entries.append(f)
def extract_episodes(html):
for num, episode_html in enumerate(re.findall(
r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', html), 1):
episodebox_title = self._search_regex(
(r'class="episodebox-title"[^>]+title=(["\'])(?P<title>.+?)\1',
r'class="episodebox-title"[^>]+>(?P<title>.+?)<'),
episode_html, 'episodebox title', default=None, group='title')
if not episodebox_title:
continue
episode_number = int(self._search_regex(
r'(?:Episode|Film)\s*(\d+)',
episodebox_title, 'episode number', default=num))
episode_title = self._search_regex(
r'(?:Episode|Film)\s*\d+\s*-\s*(.+)',
episodebox_title, 'episode title', default=None)
video_id = 'episode-%d' % episode_number
common_info = {
'id': video_id,
'series': anime_title,
'episode': episode_title,
'episode_number': episode_number,
}
extract_entries(episode_html, video_id, common_info)
def extract_film(html, video_id):
common_info = {
'id': anime_id,
'title': anime_title,
'description': anime_description,
}
extract_entries(html, video_id, common_info)
extract_episodes(webpage)
if not entries:
extract_film(webpage, anime_id)
return self.playlist_result(entries, anime_id, anime_title, anime_description)
| unlicense |
qma/pants | contrib/node/tests/python/pants_test/contrib/node/tasks/test_npm_resolve.py | 3 | 7155 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from textwrap import dedent
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.npm_resolve import NpmResolve
class NpmResolveTest(TaskTestBase):
@classmethod
def task_type(cls):
return NpmResolve
def test_noop(self):
task = self.create_task(self.context())
task.execute()
def test_noop_na(self):
target = self.make_target(spec=':not_a_node_target', target_type=Target)
task = self.create_task(self.context(target_roots=[target]))
task.execute()
def test_resolve_simple(self):
typ = self.make_target(spec='3rdparty/node:typ', target_type=NodeRemoteModule, version='0.6.3')
self.create_file('src/node/util/util.js', contents=dedent("""
var typ = require('typ');
console.log("type of boolean is: " + typ.BOOLEAN);
"""))
target = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['util.js'],
dependencies=[typ])
context = self.context(target_roots=[target])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(target)
self.assertIsNotNone(node_path)
script_path = os.path.join(node_path, 'util.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
self.assertIn('type of boolean is: boolean', out)
def test_resolve_simple_graph(self):
typ1 = self.make_target(spec='3rdparty/node:typ1',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.x')
typ2 = self.make_target(spec='3rdparty/node:typ2',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.1')
self.create_file('src/node/util/typ.js', contents=dedent("""
var typ = require('typ');
module.exports = {
BOOL: typ.BOOLEAN
};
"""))
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['typ.js'],
dependencies=[typ1])
self.create_file('src/node/leaf/leaf.js', contents=dedent("""
var typ = require('typ');
var util_typ = require('util/typ');
console.log("type of boolean is: " + typ.BOOLEAN);
console.log("type of bool is: " + util_typ.BOOL);
"""))
leaf = self.make_target(spec='src/node/leaf',
target_type=NodeModule,
sources=['leaf.js'],
dependencies=[util, typ2])
context = self.context(target_roots=[leaf])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
self.assertIsNotNone(node_paths.node_path(util))
node_path = node_paths.node_path(leaf)
self.assertIsNotNone(node_paths.node_path(leaf))
# Verify the 'typ' package is not duplicated under leaf. The target dependency tree is:
# leaf
# typ2 (0.6.1)
# util
# typ1 (0.6.x)
# If we install leaf normally, NPM will install the typ2 target (typ version 0.6.1) at the top
# level under leaf, and then not install the typ1 target (typ version 0.6.x) under util
# because the dependency is already satisfied.
typ_packages = []
for root, _, files in os.walk(node_path):
for f in files:
if 'package.json' == f:
with open(os.path.join(root, f)) as fp:
package = json.load(fp)
if 'typ' == package['name']:
typ_packages.append(os.path.relpath(os.path.join(root, f), node_path))
self.assertEqual(1, len(typ_packages),
'Expected to find exactly 1 de-duped `typ` package, but found these:'
'\n\t{}'.format('\n\t'.join(sorted(typ_packages))))
script_path = os.path.join(node_path, 'leaf.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
lines = {line.strip() for line in out.splitlines()}
self.assertIn('type of boolean is: boolean', lines)
self.assertIn('type of bool is: boolean', lines)
def test_resolve_preserves_package_json(self):
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=[],
dependencies=[])
self.create_file('src/node/scripts_project/package.json', contents=dedent("""
{
"name": "scripts_project",
"version": "1.2.3",
"dependencies": { "A": "file://A" },
"devDependencies": { "B": "file://B" },
"peerDependencies": { "C": "file://C" },
"optionalDependencies": { "D": "file://D" },
"scripts": {
"test": "mocha */dist.js"
}
}
"""))
scripts_project = self.make_target(spec='src/node/scripts_project',
target_type=NodeModule,
sources=['package.json'],
dependencies=[util])
context = self.context(target_roots=[scripts_project])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(scripts_project)
self.assertIsNotNone(node_paths.node_path(scripts_project))
package_json_path = os.path.join(node_path, 'package.json')
with open(package_json_path) as fp:
package = json.load(fp)
self.assertEqual('scripts_project', package['name'],
'Expected to find package name of `scripts_project`, but found: {}'
.format(package['name']))
self.assertEqual('1.2.3', package['version'],
'Expected to find package version of `1.2.3`, but found: {}'
.format(package['version']))
self.assertEqual('mocha */dist.js', package['scripts']['test'],
'Expected to find package test script of `mocha */dist.js`, but found: {}'
.format(package['scripts']['test']))
self.assertEqual(node_paths.node_path(util), package['dependencies']['util'])
self.assertNotIn('A', package['dependencies'])
self.assertNotIn('devDependencies', package)
self.assertNotIn('peerDependencies', package)
self.assertNotIn('optionalDependencies', package)
| apache-2.0 |
diydrones/ardupilot | Tools/autotest/pysim/vehicleinfo.py | 4 | 15380 | class VehicleInfo(object):
def __init__(self):
"""
waf_target: option passed to waf's --target to create binary
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
self.options = {
"ArduCopter": {
"default_frame": "quad",
"frames": {
# COPTER
"+": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the
# param fetch happens asynchronously
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"bfx": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-bfx.parm" ],
},
"djix": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-djix.parm" ],
},
"cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-cwx.parm" ],
},
"hexa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-hexa.parm" ],
},
"hexa-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"hexa-dji": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"octa-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octaquad.parm" ],
},
"octa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octa.parm" ],
},
"octa-dji": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"deca": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-deca.parm" ],
},
"deca-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-tri.parm" ],
},
"y6": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-y6.parm" ],
},
"dodeca-hexa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-dodecahexa.parm" ],
},
# SIM
"IrisRos": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"gazebo-iris": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/gazebo-iris.parm"],
},
"airsim-copter": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/airsim-quadX.parm"],
},
# HELICOPTER
"heli": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
"default_params/copter-heli-dual.parm"],
},
"heli-compound": {
"waf_target": "bin/arducopter-heli",
},
"singlecopter": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter-single.parm",
"default_params/copter-coax.parm"],
},
"scrimmage-copter" : {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
"Callisto": {
"model": "octa-quad:@ROMFS/models/Callisto.json",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"models/Callisto.param"],
},
},
},
"Blimp": {
"default_frame": "quad",
"frames": {
# BLIMP
"quad": {
"model": "+",
"waf_target": "bin/blimp",
"default_params_filename": "default_params/blimp.parm",
},
},
},
"ArduPlane": {
"default_frame": "plane",
"frames": {
# PLANE
"quadplane-tilttri": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tilttrivec": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttrivec.parm",
},
"quadplane-tilthvec": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/quadplane-tilthvec.parm"],
},
"quadplane-tri": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane-cl84" : {
"waf_target" : "bin/arduplane",
"default_params_filename": "default_params/quadplane-cl84.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"firefly": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/firefly.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-elevons.parm"],
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-vtail.parm"],
},
"plane-tailsitter": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-tailsitter.parm",
},
"plane-jet": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-jet.parm"],
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"plane-dspoilers": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-dspoilers.parm"]
},
"plane-soaring": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-soaring.parm"]
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
"scrimmage-plane" : {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"Rover": {
"default_frame": "rover",
"frames": {
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"rover-vectored": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-vectored.parm"],
},
"balancebot": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm",
"default_params/balancebot.parm"],
},
"sailboat": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat.parm"],
},
"sailboat-motor": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat-motor.parm"],
},
"gazebo-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"airsim-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/airsim-rover.parm"],
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduSub": {
"default_frame": "vectored",
"frames": {
"vectored": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
"vectored_6dof": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub-6dof.parm",
},
"gazebo-bluerov2": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
},
},
"AntennaTracker": {
"default_frame": "tracker",
"frames": {
"tracker": {
"waf_target": "bin/antennatracker",
},
},
},
}
def default_frame(self, vehicle):
return self.options[vehicle]["default_frame"]
def default_waf_target(self, vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
default_frame = self.default_frame(vehicle)
return self.options[vehicle]["frames"][default_frame]["waf_target"]
def options_for_frame(self, frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
frames = self.options[vehicle]["frames"]
if frame in frames:
ret = self.options[vehicle]["frames"][frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane", "airsim"]:
if frame.startswith(p):
ret = self.options[vehicle]["frames"][p]
break
if ret is None:
if frame.endswith("-heli"):
ret = self.options[vehicle]["frames"]["heli"]
if ret is None:
print("WARNING: no config for frame (%s)" % frame)
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "waf_target" not in ret:
ret["waf_target"] = self.default_waf_target(vehicle)
if opts.build_target is not None:
ret["waf_target"] = opts.build_target
return ret
| gpl-3.0 |
zeptonaut/catapult | dashboard/dashboard/alerts_test.py | 2 | 7848 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import webapp2
import webtest
from dashboard import alerts
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import bug_data
from dashboard.models import sheriff
from dashboard.models import stoppage_alert
class AlertsTest(testing_common.TestCase):
# TODO(qyearsley): Simplify this unit test.
def setUp(self):
super(AlertsTest, self).setUp()
app = webapp2.WSGIApplication([('/alerts', alerts.AlertsHandler)])
self.testapp = webtest.TestApp(app)
def _AddAlertsToDataStore(self):
"""Adds sample data, including triaged and non-triaged alerts."""
key_map = {}
sheriff_key = sheriff.Sheriff(
id='Chromium Perf Sheriff', email='sullivan@google.com').put()
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
first_paint = utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint')
mean_frame_time = utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time')
# By default, all Test entities have an improvement_direction of UNKNOWN,
# meaning that neither direction is considered an improvement.
# Here we set the improvement direction so that some anomalies are
# considered improvements.
for test_key in [first_paint, mean_frame_time]:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
# Add some (12) non-triaged alerts.
for end_rev in range(10000, 10120, 10):
test_key = first_paint if end_rev % 20 == 0 else mean_frame_time
anomaly_entity = anomaly.Anomaly(
start_revision=end_rev - 5, end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key)
anomaly_entity.SetIsImprovement()
anomaly_key = anomaly_entity.put()
key_map[end_rev] = anomaly_key.urlsafe()
# Add some (2) already-triaged alerts.
for end_rev in range(10120, 10140, 10):
test_key = first_paint if end_rev % 20 == 0 else mean_frame_time
bug_id = -1 if end_rev % 20 == 0 else 12345
anomaly_entity = anomaly.Anomaly(
start_revision=end_rev - 5, end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
bug_id=bug_id, sheriff=sheriff_key)
anomaly_entity.SetIsImprovement()
anomaly_key = anomaly_entity.put()
key_map[end_rev] = anomaly_key.urlsafe()
if bug_id > 0:
bug_data.Bug(id=bug_id).put()
# Add some (6) non-triaged improvements.
for end_rev in range(10140, 10200, 10):
test_key = mean_frame_time
anomaly_entity = anomaly.Anomaly(
start_revision=end_rev - 5, end_revision=end_rev, test=test_key,
median_before_anomaly=200, median_after_anomaly=100,
sheriff=sheriff_key)
anomaly_entity.SetIsImprovement()
anomaly_key = anomaly_entity.put()
self.assertTrue(anomaly_entity.is_improvement)
key_map[end_rev] = anomaly_key.urlsafe()
return key_map
def testGet_NoParametersSet_UntriagedAlertsListed(self):
key_map = self._AddAlertsToDataStore()
response = self.testapp.get('/alerts')
anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
self.assertEqual(12, len(anomaly_list))
# The test below depends on the order of the items, but the order is not
# guaranteed; it depends on the timestamps, which depend on put order.
anomaly_list.sort(key=lambda a: -a['end_revision'])
expected_end_rev = 10110
for alert in anomaly_list:
self.assertEqual(expected_end_rev, alert['end_revision'])
self.assertEqual(expected_end_rev - 5, alert['start_revision'])
self.assertEqual(key_map[expected_end_rev], alert['key'])
self.assertEqual('ChromiumGPU', alert['master'])
self.assertEqual('linux-release', alert['bot'])
self.assertEqual('scrolling-benchmark', alert['testsuite'])
if expected_end_rev % 20 == 0:
self.assertEqual('first_paint', alert['test'])
else:
self.assertEqual('mean_frame_time', alert['test'])
self.assertEqual('100.0%', alert['percent_changed'])
self.assertIsNone(alert['bug_id'])
expected_end_rev -= 10
self.assertEqual(expected_end_rev, 9990)
def testGet_TriagedParameterSet_TriagedListed(self):
self._AddAlertsToDataStore()
response = self.testapp.get('/alerts', {'triaged': 'true'})
anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
# The alerts listed should contain those added above, including alerts
# that have a bug ID that is not None.
self.assertEqual(14, len(anomaly_list))
expected_end_rev = 10130
# The test below depends on the order of the items, but the order is not
# guaranteed; it depends on the timestamps, which depend on put order.
anomaly_list.sort(key=lambda a: -a['end_revision'])
for alert in anomaly_list:
if expected_end_rev == 10130:
self.assertEqual(12345, alert['bug_id'])
elif expected_end_rev == 10120:
self.assertEqual(-1, alert['bug_id'])
else:
self.assertIsNone(alert['bug_id'])
expected_end_rev -= 10
self.assertEqual(expected_end_rev, 9990)
def testGet_ImprovementsParameterSet_ListsImprovements(self):
self._AddAlertsToDataStore()
response = self.testapp.get('/alerts', {'improvements': 'true'})
anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
self.assertEqual(18, len(anomaly_list))
def testGet_SheriffParameterSet_OtherSheriffAlertsListed(self):
self._AddAlertsToDataStore()
# Add another sheriff to the mock datastore, and set the sheriff of some
# anomalies to be this new sheriff.
sheriff2_key = sheriff.Sheriff(
id='Sheriff2', email='sullivan@google.com').put()
mean_frame_time = utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time')
anomalies = anomaly.Anomaly.query(
anomaly.Anomaly.test == mean_frame_time).fetch()
for anomaly_entity in anomalies:
anomaly_entity.sheriff = sheriff2_key
anomaly_entity.put()
response = self.testapp.get('/alerts', {'sheriff': 'Sheriff2'})
anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
sheriff_list = self.GetEmbeddedVariable(response, 'SHERIFF_LIST')
for alert in anomaly_list:
self.assertEqual('mean_frame_time', alert['test'])
self.assertEqual(2, len(sheriff_list))
self.assertEqual('Chromium Perf Sheriff', sheriff_list[0])
self.assertEqual('Sheriff2', sheriff_list[1])
def testGet_StoppageAlerts_EmbedsStoppageAlertListAndOneTable(self):
sheriff.Sheriff(id='Sheriff', patterns=['M/b/*/*']).put()
testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}})
test_key = utils.TestKey('M/b/foo/bar')
rows = testing_common.AddRows('M/b/foo/bar', {9800, 9802})
for row in rows:
stoppage_alert.CreateStoppageAlert(test_key.get(), row).put()
response = self.testapp.get('/alerts?sheriff=Sheriff')
stoppage_alert_list = self.GetEmbeddedVariable(
response, 'STOPPAGE_ALERT_LIST')
self.assertEqual(2, len(stoppage_alert_list))
self.assertEqual(1, len(response.html('alerts-table')))
def testGet_WithNoAlerts_HasImageAndNoAlertsTable(self):
response = self.testapp.get('/alerts')
self.assertEqual(1, len(response.html('img')))
self.assertEqual(0, len(response.html('alerts-table')))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
xwolf12/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
jasonwzhy/django | django/contrib/contenttypes/migrations/0001_initial.py | 585 | 1227 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
| bsd-3-clause |
keen99/SickRage | tornado/platform/asyncio.py | 65 | 5689 | """Bridges between the `asyncio` module and Tornado IOLoop.
This is a work in progress and interfaces are subject to change.
To test:
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOLoop
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOMainLoop
(the tests log a few warnings with AsyncIOMainLoop because they leave some
unfinished callbacks on the event loop that fail when it resumes)
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import tornado.concurrent
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False):
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
self.asyncio_loop.call_soon(self.make_current)
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
self._setup_logging()
self.asyncio_loop.run_forever()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False)
class AsyncIOLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(),
close_loop=True)
def to_tornado_future(asyncio_future):
"""Convert an ``asyncio.Future`` to a `tornado.concurrent.Future`."""
tf = tornado.concurrent.Future()
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future):
"""Convert a `tornado.concurrent.Future` to an ``asyncio.Future``."""
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future)
| gpl-3.0 |
wileeam/airflow | tests/providers/apache/spark/hooks/test_spark_sql.py | 4 | 4146 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
from itertools import dropwhile
from unittest.mock import call, patch
from airflow.models import Connection
from airflow.providers.apache.spark.hooks.spark_sql import SparkSqlHook
from airflow.utils import db
def get_after(sentinel, iterable):
"""Get the value after `sentinel` in an `iterable`"""
truncated = dropwhile(lambda el: el != sentinel, iterable)
next(truncated)
return next(truncated)
class TestSparkSqlHook(unittest.TestCase):
_config = {
'conn_id': 'spark_default',
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'sql': ' /path/to/sql/file.sql ',
'conf': 'key=value,PROP=VALUE'
}
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark_default', conn_type='spark',
host='yarn://yarn-master')
)
def test_build_command(self):
hook = SparkSqlHook(**self._config)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(hook._prepare_command(""))
# Check all the parameters
assert "--executor-cores {}".format(self._config['executor_cores']) in cmd
assert "--executor-memory {}".format(self._config['executor_memory']) in cmd
assert "--keytab {}".format(self._config['keytab']) in cmd
assert "--name {}".format(self._config['name']) in cmd
assert "--num-executors {}".format(self._config['num_executors']) in cmd
sql_path = get_after('-f', hook._prepare_command(""))
assert self._config['sql'].strip() == sql_path
# Check if all config settings are there
for key_value in self._config['conf'].split(","):
k, v = key_value.split('=')
assert "--conf {0}={1}".format(k, v) in cmd
if self._config['verbose']:
assert "--verbose" in cmd
@patch('airflow.providers.apache.spark.hooks.spark_sql.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = io.StringIO('Spark-sql communicates using stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(
conn_id='spark_default',
sql='SELECT 1'
)
with patch.object(hook.log, 'debug') as mock_debug:
with patch.object(hook.log, 'info') as mock_info:
hook.run_query()
mock_debug.assert_called_once_with(
'Spark-Sql cmd: %s',
['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose',
'--queue', 'default']
)
mock_info.assert_called_once_with(
'Spark-sql communicates using stdout'
)
# Then
self.assertEqual(
mock_popen.mock_calls[0],
call(['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose',
'--queue', 'default'], stderr=-2, stdout=-1)
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
TiVoMaker/boto | tests/integration/rds/test_cert_verification.py | 126 | 1548 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.rds
class RDSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
rds = True
regions = boto.rds.regions()
def sample_service_call(self, conn):
conn.get_all_dbinstances()
| mit |
RafayelGardishyan/Python-minis | TheCoiner/main.py | 1 | 7776 | import time
import datetime
d = datetime.date.today()
dm = d.month
dd = d.day
print ('Welkom bij TheCoiner')
file = 'data/monthday/month.txt'
f = open(file)
file_contents = f.read()
pdm = file_contents
file = 'data/monthday/day.txt'
f = open(file)
file_contents = f.read()
pdd = file_contents
file = 'data/coins/dailycoins.txt'
f = open(file)
file_contents = f.read()
tc = file_contents
with open("data/monthday/month.txt", "w") as out_month:
char = "1"
for i in range(len(char)):
out_string = str(dm)
out_month.write(out_string)
with open("data/monthday/day.txt", "w") as out_day:
char = "1"
for i in range(len(char)):
out_string = str(dd)
out_day.write(out_string)
if int(dm) > int(pdm):
print('Je hebt ' + tc + ' munt(en) gekregen')
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
coins = int(coins) + int(tc)
coins = str(coins)
with open("data/coins/coins.txt", "w") as out_coins:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_coins.write(out_string)
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
print('Je hebt ' + coins + ' munten')
else :
if int(dd) > int(pdd):
print('Je hebt ' + tc + ' munt(en) gekregen')
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
coins = int(coins) + 1
coins = str(coins)
with open("data/coins/coins.txt", "w") as out_coins :
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_coins.write(out_string)
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
print('Je hebt ' + coins + ' munten')
else:
print("Je hebt vandaag al munt(en) gekregen")
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
coins = str(coins)
print('Je hebt ' + coins + ' munten')
coins = int(coins)
if coins > 1:
while (coins != 1):
print("Je kan de volgende dingen kopen \n 1. Geldboom = 2 munten (+1) \n 2. Oma = 5 munten (+5) \n 3. Spaarvarken 15 munten(+10)")
ch = input()
if ch == 1:
coins = int(coins)- 2
with open("data/coins/coins.txt", "w") as out_coins:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_coins.write(out_string)
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
print("Je hebt nu " + coins + " munten")
tc = int(tc) + 1
with open("data/coins/dailycoins.txt", "w") as out_dc:
char = "1"
for i in range(len(char)):
out_string = str(tc)
out_dc.write(out_string)
file = 'data/items/mt.txt'
f = open(file)
file_contents = f.read()
pb = file_contents
pb = int(pb)+ 1
with open("data/items/mt.txt", "w") as out_mt:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_mt.write(out_string)
file = 'data/coins/dailycoins.txt'
f = open(file)
file_contents = f.read()
tc = file_contents
print("Je hebt een Geldboom gekocht je krijgt nu " + tc + " munten per dag. Je balans is: " + coins + " munten.")
print("Wil je nog iets anders kopen ?")
elif ch == 3:
coins = int(coins)-15
with open("data/coins/coins.txt", "w") as out_coins:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_coins.write(out_string)
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
print("Je hebt nu " + coins + " munten")
tc = int(tc) + 5
with open("data/coins/dailycoins.txt", "w") as out_dc:
char = "1"
for i in range(len(char)):
out_string = str(tc)
out_dc.write(out_string)
file = 'data/items/pb.txt'
f = open(file)
file_contents = f.read()
pb = file_contents
pb = int(pb)+ 1
with open("data/items/pb.txt", "w") as out_pb:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_pb.write(out_string)
file = 'data/coins/dailycoins.txt'
f = open(file)
file_contents = f.read()
tc = file_contents
print("Je hebt een Spaarvarken gekocht je krijgt nu " + tc + " munten per dag. Je balans is: " + coins + " munten.")
print("Wil je nog iets anders kopen ?")
elif ch == 2:
coins = int(coins)-5
with open("data/coins/coins.txt", "w") as out_coins:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_coins.write(out_string)
file = 'data/coins/coins.txt'
f = open(file)
file_contents = f.read()
coins = file_contents
print("Je hebt nu " + coins + " muntren")
tc = int(tc) + 10
with open("data/coins/dailycoins.txt", "w") as out_dc:
char = "1"
for i in range(len(char)):
out_string = str(tc)
out_dc.write(out_string)
file = 'data/items/gm.txt'
f = open(file)
file_contents = f.read()
pb = file_contents
pb = int(pb)+ 1
with open("data/items/gm.txt", "w") as out_gm:
char = "1"
for i in range(len(char)):
out_string = str(coins)
out_gm.write(out_string)
file = 'data/coins/dailycoins.txt'
f = open(file)
file_contents = f.read()
tc = file_contents
print("Je hebt een Oma gekocht je krijgt nu " + tc + " munten per dag. Je balans is: " + coins + " munten.")
print("Wil je nog iets anders kopen ?")
else:
print("Je kan niks meer kopen. \n Kom morgen terug en krijg meer munten!")
| gpl-2.0 |
raccoongang/edx-platform | lms/djangoapps/shoppingcart/processors/tests/test_CyberSource.py | 10 | 13182 | """
Tests for the CyberSource processor handler
"""
from collections import OrderedDict
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from mock import Mock, patch
from shoppingcart.models import Order, OrderItem
from shoppingcart.processors.CyberSource import (
REASONCODE_MAP,
get_processor_decline_html,
get_processor_exception_html,
payment_accepted,
process_postpay_callback,
processor_hash,
record_purchase,
render_purchase_form_html,
sign,
verify_signatures
)
from shoppingcart.processors.exceptions import (
CCProcessorDataException,
CCProcessorException,
CCProcessorSignatureException,
CCProcessorWrongAmountException
)
from shoppingcart.processors.helpers import get_processor_config
from student.tests.factories import UserFactory
TEST_CC_PROCESSOR_NAME = "CyberSource"
TEST_CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': 'secret',
'MERCHANT_ID': 'edx_test',
'SERIAL_NUMBER': '12345',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
'microsites': {
'test_site': {
'SHARED_SECRET': 'secret_override',
'MERCHANT_ID': 'edx_test_override',
'SERIAL_NUMBER': '12345_override',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
}
}
}
}
def fake_site(name, default=None): # pylint: disable=unused-argument
"""
This is a test mocking function to return a site configuration
"""
if name == 'cybersource_config_key':
return 'test_site'
else:
return None
@override_settings(
CC_PROCESSOR_NAME=TEST_CC_PROCESSOR_NAME,
CC_PROCESSOR=TEST_CC_PROCESSOR
)
class CyberSourceTests(TestCase):
def test_override_settings(self):
self.assertEqual(settings.CC_PROCESSOR['CyberSource']['MERCHANT_ID'], 'edx_test')
self.assertEqual(settings.CC_PROCESSOR['CyberSource']['SHARED_SECRET'], 'secret')
def test_site_no_override_settings(self):
self.assertEqual(get_processor_config()['MERCHANT_ID'], 'edx_test')
self.assertEqual(get_processor_config()['SHARED_SECRET'], 'secret')
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_site)
def test_site_override_settings(self):
self.assertEqual(get_processor_config()['MERCHANT_ID'], 'edx_test_override')
self.assertEqual(get_processor_config()['SHARED_SECRET'], 'secret_override')
def test_hash(self):
"""
Tests the hash function. Basically just hardcodes the answer.
"""
self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')
self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')
def test_sign_then_verify(self):
"""
"loopback" test:
Tests the that the verify function verifies parameters signed by the sign function
"""
params = OrderedDict()
params['amount'] = "12.34"
params['currency'] = 'usd'
params['orderPage_transactionType'] = 'sale'
params['orderNumber'] = "567"
verify_signatures(sign(params), signed_fields_key='orderPage_signedFields',
full_sig_key='orderPage_signaturePublic')
# if the above verify_signature fails it will throw an exception, so basically we're just
# testing for the absence of that exception. the trivial assert below does that
self.assertEqual(1, 1)
def test_sign_then_verify_unicode(self):
"""
Similar to the test above, which loops back to the original.
Testing to make sure we can handle unicode parameters
"""
params = {
'card_accountNumber': '1234',
'card_cardType': '001',
'billTo_firstName': u'\u2699',
'billTo_lastName': u"\u2603",
'orderNumber': '1',
'orderCurrency': 'usd',
'decision': 'ACCEPT',
'ccAuthReply_amount': '0.00'
}
verify_signatures(sign(params), signed_fields_key='orderPage_signedFields',
full_sig_key='orderPage_signaturePublic')
# if the above verify_signature fails it will throw an exception, so basically we're just
# testing for the absence of that exception. the trivial assert below does that
self.assertEqual(1, 1)
def test_verify_exception(self):
"""
Tests that failure to verify raises the proper CCProcessorSignatureException
"""
params = OrderedDict()
params['a'] = 'A'
params['b'] = 'B'
params['signedFields'] = 'A,B'
params['signedDataPublicSignature'] = 'WONTVERIFY'
with self.assertRaises(CCProcessorSignatureException):
verify_signatures(params)
def test_get_processor_decline_html(self):
"""
Tests the processor decline html message
"""
DECISION = 'REJECT'
for code, reason in REASONCODE_MAP.iteritems():
params = {
'decision': DECISION,
'reasonCode': code,
}
html = get_processor_decline_html(params)
self.assertIn(DECISION, html)
self.assertIn(reason, html)
self.assertIn(code, html)
self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)
def test_get_processor_exception_html(self):
"""
Tests the processor exception html message
"""
for type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:
error_msg = "An exception message of with exception type {0}".format(str(type))
exception = type(error_msg)
html = get_processor_exception_html(exception)
self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)
self.assertIn('Sorry!', html)
self.assertIn(error_msg, html)
# test base case
self.assertIn("EXCEPTION!", get_processor_exception_html(CCProcessorException()))
def test_record_purchase(self):
"""
Tests record_purchase with good and without returned CCNum
"""
student1 = UserFactory()
student1.save()
student2 = UserFactory()
student2.save()
params_cc = {'card_accountNumber': '1234', 'card_cardType': '001', 'billTo_firstName': student1.first_name}
params_nocc = {'card_accountNumber': '', 'card_cardType': '002', 'billTo_firstName': student2.first_name}
order1 = Order.get_cart_for_user(student1)
order2 = Order.get_cart_for_user(student2)
record_purchase(params_cc, order1)
record_purchase(params_nocc, order2)
self.assertEqual(order1.bill_to_first, student1.first_name)
self.assertEqual(order1.status, 'purchased')
order2 = Order.objects.get(user=student2)
self.assertEqual(order2.bill_to_first, student2.first_name)
self.assertEqual(order2.status, 'purchased')
def test_payment_accepted_invalid_dict(self):
"""
Tests exception is thrown when params to payment_accepted don't have required key
or have an bad value
"""
baseline = {
'orderNumber': '1',
'orderCurrency': 'usd',
'decision': 'ACCEPT',
}
wrong = {
'orderNumber': 'k',
}
# tests for missing key
for key in baseline:
params = baseline.copy()
del params[key]
with self.assertRaises(CCProcessorDataException):
payment_accepted(params)
# tests for keys with value that can't be converted to proper type
for key in wrong:
params = baseline.copy()
params[key] = wrong[key]
with self.assertRaises(CCProcessorDataException):
payment_accepted(params)
def test_payment_accepted_order(self):
"""
Tests payment_accepted cases with an order
"""
student1 = UserFactory()
student1.save()
order1 = Order.get_cart_for_user(student1)
params = {
'card_accountNumber': '1234',
'card_cardType': '001',
'billTo_firstName': student1.first_name,
'billTo_lastName': u"\u2603",
'orderNumber': str(order1.id),
'orderCurrency': 'usd',
'decision': 'ACCEPT',
'ccAuthReply_amount': '0.00'
}
# tests for an order number that doesn't match up
params_bad_ordernum = params.copy()
params_bad_ordernum['orderNumber'] = str(order1.id + 10)
with self.assertRaises(CCProcessorDataException):
payment_accepted(params_bad_ordernum)
# tests for a reply amount of the wrong type
params_wrong_type_amt = params.copy()
params_wrong_type_amt['ccAuthReply_amount'] = 'ab'
with self.assertRaises(CCProcessorDataException):
payment_accepted(params_wrong_type_amt)
# tests for a reply amount of the wrong type
params_wrong_amt = params.copy()
params_wrong_amt['ccAuthReply_amount'] = '1.00'
with self.assertRaises(CCProcessorWrongAmountException):
payment_accepted(params_wrong_amt)
# tests for a not accepted order
params_not_accepted = params.copy()
params_not_accepted['decision'] = "REJECT"
self.assertFalse(payment_accepted(params_not_accepted)['accepted'])
# finally, tests an accepted order
self.assertTrue(payment_accepted(params)['accepted'])
@patch('shoppingcart.processors.CyberSource.render_to_string', autospec=True)
def test_render_purchase_form_html(self, render):
"""
Tests the rendering of the purchase form
"""
student1 = UserFactory()
student1.save()
order1 = Order.get_cart_for_user(student1)
item1 = OrderItem(order=order1, user=student1, unit_cost=1.0, line_cost=1.0)
item1.save()
render_purchase_form_html(order1)
((template, context), render_kwargs) = render.call_args
self.assertEqual(template, 'shoppingcart/cybersource_form.html')
self.assertDictContainsSubset({'amount': '1.00',
'currency': 'usd',
'orderPage_transactionType': 'sale',
'orderNumber': str(order1.id)},
context['params'])
def test_process_postpay_exception(self):
"""
Tests the exception path of process_postpay_callback
"""
baseline = {
'orderNumber': '1',
'orderCurrency': 'usd',
'decision': 'ACCEPT',
}
# tests for missing key
for key in baseline:
params = baseline.copy()
del params[key]
result = process_postpay_callback(params)
self.assertFalse(result['success'])
self.assertIsNone(result['order'])
self.assertIn('error_msg', result['error_html'])
@patch('shoppingcart.processors.CyberSource.verify_signatures', Mock(return_value=True))
def test_process_postpay_accepted(self):
"""
Tests the ACCEPTED path of process_postpay
"""
student1 = UserFactory()
student1.save()
order1 = Order.get_cart_for_user(student1)
params = {
'card_accountNumber': '1234',
'card_cardType': '001',
'billTo_firstName': student1.first_name,
'orderNumber': str(order1.id),
'orderCurrency': 'usd',
'decision': 'ACCEPT',
'ccAuthReply_amount': '0.00'
}
result = process_postpay_callback(params)
self.assertTrue(result['success'])
self.assertEqual(result['order'], order1)
order1 = Order.objects.get(id=order1.id) # reload from DB to capture side-effect of process_postpay_callback
self.assertEqual(order1.status, 'purchased')
self.assertFalse(result['error_html'])
@patch('shoppingcart.processors.CyberSource.verify_signatures', Mock(return_value=True))
def test_process_postpay_not_accepted(self):
"""
Tests the non-ACCEPTED path of process_postpay
"""
student1 = UserFactory()
student1.save()
order1 = Order.get_cart_for_user(student1)
params = {
'card_accountNumber': '1234',
'card_cardType': '001',
'billTo_firstName': student1.first_name,
'orderNumber': str(order1.id),
'orderCurrency': 'usd',
'decision': 'REJECT',
'ccAuthReply_amount': '0.00',
'reasonCode': '207'
}
result = process_postpay_callback(params)
self.assertFalse(result['success'])
self.assertEqual(result['order'], order1)
self.assertEqual(order1.status, 'cart')
self.assertIn(REASONCODE_MAP['207'], result['error_html'])
| agpl-3.0 |
kleintom/dxr | tests/test_query.py | 9 | 1458 | """Tests for the dxr.build module
Much of the module is covered in the course of the integration tests that test
everything else. Here are a few unit tests.
"""
from unittest import TestCase
from nose.tools import eq_
from dxr.query import fix_extents_overlap
class FixExtentsOverlapTests(TestCase):
"""Tests for fix_extents_overlap()"""
def test_duplicate(self):
"""Duplicate extents should be combined."""
eq_(list(fix_extents_overlap([(22, 34), (22, 34)])),
[(22, 34)])
def test_disjoint(self):
"""Disjoint extents should remain unmolested."""
eq_(list(fix_extents_overlap([(1, 7), (8, 12)])),
[(1, 7), (8, 12)])
def test_overlap(self):
"""Overlapping extents should be merged."""
eq_(list(fix_extents_overlap([(1, 7), (5, 8)])),
[(1, 8)])
def test_adjacent(self):
"""Adjacent extents should be coalesced.
This is not important, but it's nice.
"""
eq_(list(fix_extents_overlap([(1, 7), (7, 10)])),
[(1, 10)])
def test_short(self):
"""Short inputs should work."""
eq_(list(fix_extents_overlap([])),
[])
eq_(list(fix_extents_overlap([(1, 2)])),
[(1, 2)])
def test_zero(self):
"""Work even if the highlighting starts at offset 0."""
eq_(list(fix_extents_overlap([(0, 3), (2, 5), (11, 14)])),
[(0, 5), (11, 14)])
| mit |
napkindrawing/ansible | lib/ansible/modules/cloud/amazon/s3_sync.py | 43 | 18266 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_sync
short_description: Efficiently upload multiple files to S3
description:
- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping.
version_added: "2.3"
options:
mode:
description:
- sync direction.
required: true
default: 'push'
choices: [ push ]
file_change_strategy:
description:
- Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
- date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
- checksum will compare etag values based on s3's implementation of chunked md5s.
- force will always upload all files.
required: false
default: 'date_size'
choices: [ force, checksum, date_size ]
bucket:
description:
- Bucket name.
required: true
key_prefix:
description:
- In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
required: false
file_root:
description:
- File/directory path for synchronization. This is a local path.
- This root path is scrubbed from the key name, so subdirectories will remain as keys.
required: true
permission:
description:
- Canned ACL to apply to synced files.
- Changing this ACL only changes newly synced files, it does not trigger a full reupload.
required: false
choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ]
mime_map:
description:
- >
Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
For example C({".txt": "application/text", ".yml": "appication/text"})
required: false
include:
description:
- Shell pattern-style file matching.
- Used before exclude to determine eligible files (for instance, only "*.gif")
- For multiple patterns, comma-separate them.
required: false
default: "*"
exclude:
description:
- Shell pattern-style file matching.
- Used after include to remove files (for instance, skip "*.txt")
- For multiple patterns, comma-separate them.
required: false
default: ".*"
author: tedder
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: basic upload
s3_sync:
bucket: tedder
file_root: roles/s3/files/
- name: all the options
s3_sync:
bucket: tedder
file_root: roles/s3/files
mime_map:
.yml: application/text
.json: application/text
key_prefix: config_files/web
file_change_strategy: force
permission: public-read
include: "*"
exclude: "*.txt,.*"
'''
RETURN = '''
filelist_initial:
description: file listing (dicts) from inital globbing
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"modified_epoch": 1477416706
}]
filelist_local_etag:
description: file listing (dicts) including calculated local etag
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_s3:
description: file listing (dicts) including information about previously-uploaded versions
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_typed:
description: file listing (dicts) with calculated or overridden mime types
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706
}]
filelist_actionable:
description: file listing (dicts) of files that will be uploaded after the strategy decision
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477931256,
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931256 / 1477929260"
}]
uploaded:
description: file listing (dicts) of files that were actually uploaded
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931637 / 1477931489"
}]
'''
import os
import stat as osstat # os.stat constants
import mimetypes
import datetime
from dateutil import tz
import hashlib
import fnmatch
import traceback
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec
# import a class, otherwise we'll use a fully qualified path
# from ansible.module_utils.ec2 import AWSRetry
import ansible.module_utils.ec2
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
# the following function, calculate_multipart_etag, is from tlastowka
# on github and is used under its (compatible) GPL license. So this
# license applies to the following function.
# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
#
# calculate_multipart_etag Copyright (C) 2015
# Tony Lastowka <tlastowka at gmail dot com>
# https://github.com/tlastowka
#
#
# calculate_multipart_etag is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# calculate_multipart_etag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
"""
calculates a multipart upload etag for amazon s3
Arguments:
source_path -- The file to calculate the etag for
chunk_size -- The chunk size to calculate for.
"""
md5s = []
with open(source_path, 'rb') as fp:
while True:
data = fp.read(chunk_size)
if not data:
break
md5s.append(hashlib.md5(data))
if len(md5s) == 1:
new_etag = '"{}"'.format(md5s[0].hexdigest())
else: # > 1
digests = b"".join(m.digest() for m in md5s)
new_md5 = hashlib.md5(digests)
new_etag = '"{}-{}"'.format(new_md5.hexdigest(), len(md5s))
return new_etag
def gather_files(fileroot, include=None, exclude=None):
ret = []
for (dirpath, dirnames, filenames) in os.walk(fileroot):
for fn in filenames:
fullpath = os.path.join(dirpath, fn)
# include/exclude
if include:
found = False
for x in include.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if not found:
# not on the include list, so we don't want it.
continue
if exclude:
found = False
for x in exclude.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if found:
# skip it, even if previously included.
continue
chopped_path = os.path.relpath(fullpath, start=fileroot)
fstat = os.stat(fullpath)
f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME]
ret.append({
'fullpath': fullpath,
'chopped_path': chopped_path,
'modified_epoch': f_modified_epoch,
'bytes': f_size,
})
# dirpath = path *to* the directory
# dirnames = subdirs *in* our directory
# filenames
return ret
def calculate_s3_path(filelist, key_prefix=''):
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
ret.append(retentry)
return ret
def calculate_local_etag(filelist, key_prefix=''):
'''Really, "calculate md5", but since AWS uses their own format, we'll just call
it a "local etag". TODO optimization: only calculate if remote key exists.'''
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
ret.append(retentry)
return ret
def determine_mimetypes(filelist, override_map):
ret = []
for fileentry in filelist:
retentry = fileentry.copy()
localfile = fileentry['fullpath']
# reminder: file extension is '.txt', not 'txt'.
_, file_extension = os.path.splitext(localfile)
if override_map and override_map.get(file_extension):
# override? use it.
retentry['mime_type'] = override_map[file_extension]
else:
# else sniff it
retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
# might be None or '' from one of the above. Not a great type but better than nothing.
if not retentry['mime_type']:
retentry['mime_type'] = 'application/octet-stream'
ret.append(retentry)
return ret
def head_s3(s3, bucket, s3keys):
retkeys = []
for entry in s3keys:
retentry = entry.copy()
# don't modify the input dict
try:
retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
except botocore.exceptions.ClientError as err:
if (hasattr(err, 'response') and
'ResponseMetadata' in err.response and
'HTTPStatusCode' in err.response['ResponseMetadata'] and
str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
pass
else:
raise Exception(err)
# error_msg = boto_exception(err)
# return {'error': error_msg}
retkeys.append(retentry)
return retkeys
def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist)
for e in keeplist:
e['_strategy'] = strategy
# init/fetch info from S3 if we're going to use it for comparisons
if not strategy == 'force':
keeplist = head_s3(s3, bucket, s3filelist)
# now actually run the strategies
if strategy == 'checksum':
for entry in keeplist:
if entry.get('s3_head'):
# since we have a remote s3 object, compare the values.
if entry['s3_head']['ETag'] == entry['local_etag']:
# files match, so remove the entry
entry['skip_flag'] = True
else:
# file etags don't match, keep the entry.
pass
else: # we don't have an etag, so we'll keep it.
pass
elif strategy == 'date_size':
for entry in keeplist:
if entry.get('s3_head'):
# fstat = entry['stat']
local_modified_epoch = entry['modified_epoch']
local_size = entry['bytes']
# py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
# remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
remote_modified_datetime = entry['s3_head']['LastModified']
delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
remote_modified_epoch = delta.seconds + (delta.days * 86400)
remote_size = entry['s3_head']['ContentLength']
entry['whytime'] = '{} / {}'.format(local_modified_epoch, remote_modified_epoch)
entry['whysize'] = '{} / {}'.format(local_size, remote_size)
if local_modified_epoch <= remote_modified_epoch or local_size == remote_size:
entry['skip_flag'] = True
else:
entry['why'] = "no s3_head"
# else: probably 'force'. Basically we don't skip with any with other strategies.
else:
pass
# prune 'please skip' entries, if any.
return [x for x in keeplist if not x.get('skip_flag')]
def upload_files(s3, bucket, filelist, params):
ret = []
for entry in filelist:
args = {
'ContentType': entry['mime_type']
}
if params.get('permission'):
args['ACL'] = params['permission']
# if this fails exception is caught in main()
s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
ret.append(entry)
return ret
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
mode=dict(choices=['push'], default='push'),
file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
bucket=dict(required=True),
key_prefix=dict(required=False, default=''),
file_root=dict(required=True, type='path'),
permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
retries=dict(required=False),
mime_map=dict(required=False, type='dict'),
exclude=dict(required=False, default=".*"),
include=dict(required=False, default="*"),
# future options: cache_control (string or map, perhaps), encoding, metadata, storage_class, retries
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
result = {}
mode = module.params['mode']
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified")
s3 = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs)
if mode == 'push':
try:
result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
# mark changed if we actually upload something.
if result.get('uploads') and len(result.get('uploads')):
result['changed'] = True
# result.update(filelist=actionable_filelist)
except botocore.exceptions.ClientError as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
golaizola/pelisalacarta-xbmc | pelisalacarta/channels/voxfilme.py | 1 | 5019 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para voxfilme
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "voxfilme"
__category__ = "F,S"
__type__ = "generic"
__title__ = "voxfilmeonline.com"
__language__ = "RO"
__creationdate__ = "20120703"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[voxfilme.py] mainlist")
item.url="http://voxfilmeonline.com/";
return novedades(item)
def novedades(item):
logger.info("[voxfilme.py] novedades")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<div class="gallery.*?" id="post-\d+">[^<]+'
patron += '<a href="([^"]+)".*?title="([^"]+)">+<img src="([^"]+)".*?'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
'''
<a href="http://unsoloclic.info/page/2/" >« Peliculas anteriores</a>
'''
patron = '<div id="older"[^<]+<a href="([^"]+)" >Inaninte</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for match in matches:
scrapedtitle = "Inainte"
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,match)
scrapedthumbnail = ""
if (DEBUG): logger.info("url=["+scrapedurl+"], title=["+scrapedtitle+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="novedades", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def findvideos(item):
logger.info("[voxfilme.py] findvideos")
data = scrapertools.cache_page(item.url)
itemlist=[]
#<a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p>
#<a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p>
patron = '<iframe href="(http.//[a-z0-9]+.linkbucks.c[^"]+)[^>]+>"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,servertag,serverthumb in matches:
itemlist.append( Item(channel=__channel__, action="play", server="linkbucks", title=servertag+" [linkbucks]" , url=url , thumbnail=serverthumb , plot=item.plot , folder=False) )
from servers import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if videoitem.server!="linkbucks":
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
videoitem.title = "["+videoitem.server+"]"
return itemlist
def play(item):
logger.info("[voxfilme.py] play")
itemlist=[]
if item.server=="linkbucks":
logger.info("Es linkbucks")
# Averigua el enlace
from servers import linkbucks
location = linkbucks.get_long_url(item.url)
logger.info("location="+location)
# Extrae la URL de saltar el anuncio en adf.ly
if location.startswith("http://adf"):
# Averigua el enlace
from servers import adfly
location = adfly.get_long_url(location)
logger.info("location="+location)
from servers import servertools
itemlist=servertools.find_video_items(data=location)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.folder=False
else:
itemlist.append(item)
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
# mainlist
novedades_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
bien = False
for singleitem in novedades_items:
mirrors_items = findvideos( item=singleitem )
for mirror_item in mirrors_items:
video_items = play(mirror_item)
if len(video_items)>0:
return True
return False
| gpl-3.0 |
savoirfairelinux/django | tests/utils_tests/test_termcolors.py | 134 | 7045 | import unittest
from django.utils.termcolors import (
DARK_PALETTE, DEFAULT_PALETTE, LIGHT_PALETTE, NOCOLOR_PALETTE, PALETTES,
colorize, parse_color_setting,
)
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertIsNone(parse_color_setting('nocolor'))
def test_fg(self):
self.assertEqual(
parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_fg_bg(self):
self.assertEqual(
parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
def test_fg_opts(self):
self.assertEqual(
parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink', 'bold')})
)
def test_fg_bg_opts(self):
self.assertEqual(
parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink', 'bold')})
)
def test_override_palette(self):
self.assertEqual(
parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'})
)
def test_override_nocolor(self):
self.assertEqual(
parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(
parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'})
)
def test_override_with_multiple_roles(self):
self.assertEqual(
parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'})
)
def test_empty_definition(self):
self.assertIsNone(parse_color_setting(';'))
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertIsNone(parse_color_setting(';;;'))
def test_empty_options(self):
self.assertEqual(
parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_bad_palette(self):
self.assertIsNone(parse_color_setting('unknown'))
def test_bad_role(self):
self.assertIsNone(parse_color_setting('unknown='))
self.assertIsNone(parse_color_setting('unknown=green'))
self.assertEqual(
parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
def test_bad_color(self):
self.assertIsNone(parse_color_setting('error='))
self.assertEqual(
parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
self.assertIsNone(parse_color_setting('error=unknown'))
self.assertEqual(
parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)})
)
def test_bad_option(self):
self.assertEqual(
parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_role_case(self):
self.assertEqual(
parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
def test_color_case(self):
self.assertEqual(
parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
self.assertEqual(
parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'})
)
self.assertEqual(
parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'})
)
def test_opts_case(self):
self.assertEqual(
parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
self.assertEqual(
parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)})
)
def test_colorize_empty_text(self):
self.assertEqual(colorize(text=None), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=''), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=None, opts=('noreset')), '\x1b[m')
self.assertEqual(colorize(text='', opts=('noreset')), '\x1b[m')
| bsd-3-clause |
ahmadio/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | 6 | 28232 | """
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Proctoring tab
# only global staff (user.is_staff) is allowed to see this tab
can_see_proctoring = (
settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and
course.enable_proctored_exams and
request.user.is_staff
)
if can_see_proctoring:
sections.append(_section_proctoring(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_proctoring(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'proctoring',
'section_display_name': _('Proctoring'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name='{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
| agpl-3.0 |
mixman/djangodev | django/contrib/localflavor/tr/tr_provinces.py | 316 | 2191 | # -*- coding: utf-8 -*-
"""
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
PROVINCE_CHOICES = (
('01', ('Adana')),
('02', ('Adıyaman')),
('03', ('Afyonkarahisar')),
('04', ('Ağrı')),
('68', ('Aksaray')),
('05', ('Amasya')),
('06', ('Ankara')),
('07', ('Antalya')),
('75', ('Ardahan')),
('08', ('Artvin')),
('09', ('Aydın')),
('10', ('Balıkesir')),
('74', ('Bartın')),
('72', ('Batman')),
('69', ('Bayburt')),
('11', ('Bilecik')),
('12', ('Bingöl')),
('13', ('Bitlis')),
('14', ('Bolu')),
('15', ('Burdur')),
('16', ('Bursa')),
('17', ('Çanakkale')),
('18', ('Çankırı')),
('19', ('Çorum')),
('20', ('Denizli')),
('21', ('Diyarbakır')),
('81', ('Düzce')),
('22', ('Edirne')),
('23', ('Elazığ')),
('24', ('Erzincan')),
('25', ('Erzurum')),
('26', ('Eskişehir')),
('27', ('Gaziantep')),
('28', ('Giresun')),
('29', ('Gümüşhane')),
('30', ('Hakkari')),
('31', ('Hatay')),
('76', ('Iğdır')),
('32', ('Isparta')),
('33', ('Mersin')),
('34', ('İstanbul')),
('35', ('İzmir')),
('78', ('Karabük')),
('36', ('Kars')),
('37', ('Kastamonu')),
('38', ('Kayseri')),
('39', ('Kırklareli')),
('40', ('Kırşehir')),
('41', ('Kocaeli')),
('42', ('Konya')),
('43', ('Kütahya')),
('44', ('Malatya')),
('45', ('Manisa')),
('46', ('Kahramanmaraş')),
('70', ('Karaman')),
('71', ('Kırıkkale')),
('79', ('Kilis')),
('47', ('Mardin')),
('48', ('Muğla')),
('49', ('Muş')),
('50', ('Nevşehir')),
('51', ('Niğde')),
('52', ('Ordu')),
('80', ('Osmaniye')),
('53', ('Rize')),
('54', ('Sakarya')),
('55', ('Samsun')),
('56', ('Siirt')),
('57', ('Sinop')),
('58', ('Sivas')),
('73', ('Şırnak')),
('59', ('Tekirdağ')),
('60', ('Tokat')),
('61', ('Trabzon')),
('62', ('Tunceli')),
('63', ('Şanlıurfa')),
('64', ('Uşak')),
('65', ('Van')),
('77', ('Yalova')),
('66', ('Yozgat')),
('67', ('Zonguldak')),
)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.