repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
nafex/pyload | module/plugins/crypter/SafelinkingNet.py | 13 | 2936 | # -*- coding: utf-8 -*-
import re
import BeautifulSoup
from module.common.json_layer import json_loads
from module.plugins.internal.Crypter import Crypter
from module.plugins.captcha.SolveMedia import SolveMedia
class SafelinkingNet(Crypter):
__name__ = "SafelinkingNet"
__type__ = "crypter"
__version__ = "0.17"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?safelinking\.net/([pd])/\w+'
__config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Safelinking.net decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("quareevo", "quareevo@arcor.de")]
SOLVEMEDIA_PATTERN = "solvemediaApiKey = '([\w.-]+)';"
def decrypt(self, pyfile):
url = pyfile.url
if re.match(self.__pattern__, url).group(1) == "d":
header = self.load(url, just_header=True)
if 'location' in header:
self.urls = [header['location']]
else:
self.error(_("Couldn't find forwarded Link"))
else:
postData = {"post-protect": "1"}
self.html = self.load(url)
if "link-password" in self.html:
postData['link-password'] = self.get_password()
if "altcaptcha" in self.html:
for _i in xrange(5):
m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
if m:
captchaKey = m.group(1)
captcha = SolveMedia(self)
captchaProvider = "Solvemedia"
else:
self.fail(_("Error parsing captcha"))
response, challenge = captcha.challenge(captchaKey)
postData['adcopy_challenge'] = challenge
postData['adcopy_response'] = response
self.html = self.load(url, post=postData)
if "The password you entered was incorrect" in self.html:
self.fail(_("Incorrect Password"))
if not "The CAPTCHA code you entered was wrong" in self.html:
break
pyfile.package().password = ""
soup = BeautifulSoup.BeautifulSoup(self.html)
scripts = soup.findAll("script")
for s in scripts:
if "d_links" in s.text:
break
m = re.search('d_links":(\[.*?\])', s.text)
if m:
linkDict = json_loads(m.group(1))
for link in linkDict:
if not "http://" in link['full']:
self.urls.append("https://safelinking.net/d/" + link['full'])
else:
self.urls.append(link['full'])
| gpl-3.0 |
dsfsdgsbngfggb/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/About.py | 293 | 3815 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
from com.sun.star.task import XJobExecutor
if __name__<>'package':
from lib.gui import *
class About(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
self.win = DBModalDialog(60, 50, 175, 115, "About Odoo Report Designer")
fdBigFont = createUnoStruct("com.sun.star.awt.FontDescriptor")
fdBigFont.Width = 20
fdBigFont.Height = 25
fdBigFont.Weight = 120
fdBigFont.Family= 3
oLabelTitle1 = self.win.addFixedText("lblTitle1", 1, 1, 35, 30)
oLabelTitle1.Model.TextColor = 16056320
oLabelTitle1.Model.FontDescriptor = fdBigFont
oLabelTitle1.Model.FontRelief = 1
oLabelTitle1.Text = "Open"
oLabelTitle2 = self.win.addFixedText("lblTitle2", 35, 1, 30, 30)
oLabelTitle2.Model.TextColor = 1
oLabelTitle2.Model.FontDescriptor = fdBigFont
oLabelTitle2.Model.FontRelief = 1
oLabelTitle2.Text = "ERP"
oLabelProdDesc = self.win.addFixedText("lblProdDesc", 1, 30, 173, 75)
oLabelProdDesc.Model.TextColor = 1
fdBigFont.Width = 10
fdBigFont.Height = 11
fdBigFont.Weight = 76
oLabelProdDesc.Model.FontDescriptor = fdBigFont
oLabelProdDesc.Model.Align = 1
oLabelProdDesc.Model.FontRelief = 1
oLabelProdDesc.Model.MultiLine = True
oLabelProdDesc.Text = "This package helps you to create or modify\nreports in Odoo. Once connected to the\nserver, you can design your template of reports\nusing fields and expressions and browsing the\ncomplete structure of Odoo object database."
oLabelFooter = self.win.addFixedText("lblFooter", -1, -1, 173, 25)
oLabelFooter.Model.TextColor = 255
#oLabelFooter.Model.BackgroundColor = 1
oLabelFooter.Model.Border = 2
oLabelFooter.Model.BorderColor = 255
fdBigFont.Width = 8
fdBigFont.Height = 9
fdBigFont.Weight = 100
oLabelFooter.Model.FontDescriptor = fdBigFont
oLabelFooter.Model.Align = 1
oLabelFooter.Model.FontRelief = 1
oLabelFooter.Model.MultiLine = True
sMessage = "Odoo Report Designer v1.0 \nCopyright 2007-TODAY Tiny sprl \nThis product is free software, under the GNU Affero General Public License."
oLabelFooter.Text = sMessage
self.win.doModalDialog("",None)
if __name__<>"package" and __name__=="__main__":
About(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( About, "org.openoffice.openerp.report.about", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jericho/deep-learning | language-translation/problem_unittests.py | 98 | 13080 | import numpy as np
import tensorflow as tf
import itertools
import collections
import helper
def _print_success_message():
print('Tests Passed')
def test_text_to_ids(text_to_ids):
test_source_text = 'new jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .'
test_target_text = 'new jersey est parfois calme pendant l\' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .'
test_source_text = test_source_text.lower()
test_target_text = test_target_text.lower()
source_vocab_to_int, source_int_to_vocab = helper.create_lookup_tables(test_source_text)
target_vocab_to_int, target_int_to_vocab = helper.create_lookup_tables(test_target_text)
test_source_id_seq, test_target_id_seq = text_to_ids(test_source_text, test_target_text, source_vocab_to_int, target_vocab_to_int)
assert len(test_source_id_seq) == len(test_source_text.split('\n')),\
'source_id_text has wrong length, it should be {}.'.format(len(test_source_text.split('\n')))
assert len(test_target_id_seq) == len(test_target_text.split('\n')), \
'target_id_text has wrong length, it should be {}.'.format(len(test_target_text.split('\n')))
target_not_iter = [type(x) for x in test_source_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter,\
'Element in source_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
target_not_iter = [type(x) for x in test_target_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter, \
'Element in target_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
source_changed_length = [(words, word_ids)
for words, word_ids in zip(test_source_text.split('\n'), test_source_id_seq)
if len(words.split()) != len(word_ids)]
assert not source_changed_length,\
'Source text changed in size from {} word(s) to {} id(s): {}'.format(
len(source_changed_length[0][0].split()), len(source_changed_length[0][1]), source_changed_length[0][1])
target_missing_end = [word_ids for word_ids in test_target_id_seq if word_ids[-1] != target_vocab_to_int['<EOS>']]
assert not target_missing_end,\
'Missing <EOS> id at the end of {}'.format(target_missing_end[0])
target_bad_size = [(words.split(), word_ids)
for words, word_ids in zip(test_target_text.split('\n'), test_target_id_seq)
if len(word_ids) != len(words.split()) + 1]
assert not target_bad_size,\
'Target text incorrect size. {} should be length {}'.format(
target_bad_size[0][1], len(target_bad_size[0][0]) + 1)
source_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_source_text.split('\n') for word in sentence.split()],
itertools.chain.from_iterable(test_source_id_seq))
if source_vocab_to_int[word] != word_id]
assert not source_bad_id,\
'Source word incorrectly converted from {} to id {}.'.format(source_bad_id[0][0], source_bad_id[0][1])
target_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_target_text.split('\n') for word in sentence.split()],
[word_id for word_ids in test_target_id_seq for word_id in word_ids[:-1]])
if target_vocab_to_int[word] != word_id]
assert not target_bad_id,\
'Target word incorrectly converted from {} to id {}.'.format(target_bad_id[0][0], target_bad_id[0][1])
_print_success_message()
def test_model_inputs(model_inputs):
with tf.Graph().as_default():
input_data, targets, lr, keep_prob = model_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input is not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets is not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate is not a Placeholder.'
assert keep_prob.op.type == 'Placeholder', \
'Keep Probability is not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
assert keep_prob.name == 'keep_prob:0', \
'Keep Probability has bad name. Found name {}'.format(keep_prob.name)
assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')
_print_success_message()
def test_encoding_layer(encoding_layer):
rnn_size = 512
batch_size = 64
num_layers = 3
with tf.Graph().as_default():
rnn_inputs = tf.placeholder(tf.float32, [batch_size, 22, 1000])
keep_prob = tf.placeholder(tf.float32)
states = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)
assert len(states) == num_layers,\
'Found {} state(s). It should be {} states.'.format(len(states), num_layers)
bad_types = [type(state) for state in states if not isinstance(state, tf.contrib.rnn.LSTMStateTuple)]
assert not bad_types,\
'Found wrong type: {}'.format(bad_types[0])
bad_shapes = [state_tensor.get_shape()
for state in states
for state_tensor in state
if state_tensor.get_shape().as_list() not in [[None, rnn_size], [batch_size, rnn_size]]]
assert not bad_shapes,\
'Found wrong shape: {}'.format(bad_shapes[0])
_print_success_message()
def test_decoding_layer(decoding_layer):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_output, inf_output = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
assert isinstance(train_output, tf.Tensor),\
'Train Logits is wrong type: {}'.format(type(train_output))
assert isinstance(inf_output, tf.Tensor), \
'Inference Logits is wrong type: {}'.format(type(inf_output))
assert train_output.get_shape().as_list() == [batch_size, None, vocab_size],\
'Train Logits is the wrong shape: {}'.format(train_output.get_shape())
assert inf_output.get_shape().as_list() == [None, None, vocab_size], \
'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape())
_print_success_message()
def test_seq2seq_model(seq2seq_model):
batch_size = 64
target_vocab_size = 300
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
input_data = tf.placeholder(tf.int32, [64, 22])
target_data = tf.placeholder(tf.int32, [64, 22])
keep_prob = tf.placeholder(tf.float32)
train_output, inf_output = seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length,
200, target_vocab_size, 64, 80, rnn_size, num_layers, target_vocab_to_int)
assert isinstance(train_output, tf.Tensor),\
'Train Logits is wrong type: {}'.format(type(train_output))
assert isinstance(inf_output, tf.Tensor), \
'Inference Logits is wrong type: {}'.format(type(inf_output))
assert train_output.get_shape().as_list() == [batch_size, None, target_vocab_size],\
'Train Logits is the wrong shape: {}'.format(train_output.get_shape())
assert inf_output.get_shape().as_list() == [None, None, target_vocab_size], \
'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape())
_print_success_message()
def test_sentence_to_seq(sentence_to_seq):
sentence = 'this is a test sentence'
vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}
output = sentence_to_seq(sentence, vocab_to_int)
assert len(output) == 5,\
'Wrong length. Found a length of {}'.format(len(output))
assert output[3] == 2,\
'Missing <UNK> id.'
assert np.array_equal(output, [3, 6, 5, 2, 4]),\
'Incorrect ouput. Found {}'.format(output)
_print_success_message()
def test_process_decoding_input(process_decoding_input):
batch_size = 2
seq_length = 3
target_vocab_to_int = {'<GO>': 3}
with tf.Graph().as_default():
target_data = tf.placeholder(tf.int32, [batch_size, seq_length])
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
assert dec_input.get_shape() == (batch_size, seq_length),\
'Wrong shape returned. Found {}'.format(dec_input.get_shape())
test_target_data = [[10, 20, 30], [40, 18, 23]]
with tf.Session() as sess:
test_dec_input = sess.run(dec_input, {target_data: test_target_data})
assert test_dec_input[0][0] == target_vocab_to_int['<GO>'] and\
test_dec_input[1][0] == target_vocab_to_int['<GO>'],\
'Missing GO Id.'
_print_success_message()
def test_decoding_layer_train(decoding_layer_train):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length,
decoding_scope, output_fn, keep_prob)
assert train_logits.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_logits.get_shape())
_print_success_message()
def test_decoding_layer_infer(decoding_layer_infer):
vocab_size = 1000
sequence_length = 22
embedding_size = 200
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
infer_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, 10, 20,
sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
assert infer_logits.get_shape().as_list() == [None, None, vocab_size], \
'Wrong shape returned. Found {}'.format(infer_logits.get_shape())
_print_success_message()
| mit |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Lib/test/test_md5.py | 194 | 1790 | # Testing md5 module
import warnings
warnings.filterwarnings("ignore", "the md5 module is deprecated.*",
DeprecationWarning)
import unittest
from md5 import md5
from test import test_support
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class MD5_Test(unittest.TestCase):
def md5test(self, s, expected):
self.assertEqual(hexstr(md5(s).digest()), expected)
self.assertEqual(md5(s).hexdigest(), expected)
def test_basics(self):
eq = self.md5test
eq('', 'd41d8cd98f00b204e9800998ecf8427e')
eq('a', '0cc175b9c0f1b6a831c399e269772661')
eq('abc', '900150983cd24fb0d6963f7d28e17f72')
eq('message digest', 'f96b697d7cb7938d525a2f31aaf161d0')
eq('abcdefghijklmnopqrstuvwxyz', 'c3fcd3d76192e4007dfb496cca67e13b')
eq('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
eq('12345678901234567890123456789012345678901234567890123456789012345678901234567890',
'57edf4a22be3c955ac49da2e2107b67a')
def test_hexdigest(self):
# hexdigest is new with Python 2.0
m = md5('testing the hexdigest method')
h = m.hexdigest()
self.assertEqual(hexstr(m.digest()), h)
def test_large_update(self):
aas = 'a' * 64
bees = 'b' * 64
cees = 'c' * 64
m1 = md5()
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = md5()
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def test_main():
test_support.run_unittest(MD5_Test)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
plastboks/Pulpy | pulpy/scripts/initializedb.py | 1 | 1151 | import os
import sys
import transaction
from getpass import getpass
from sqlalchemy import engine_from_config
from cryptacular.bcrypt import BCRYPTPasswordManager as BPM
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pulpy.models.meta import (
DBSession,
Base,
)
from pulpy.models import User
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
m = BPM()
a_email = raw_input('Enter email for admin account: ')
a_pw = getpass('Enter password for admin account: ')
a_hashed = m.encode(a_pw)
with transaction.manager:
admin = User(
email=a_email,
password=a_hashed,
)
DBSession.add(admin)
| mit |
elingg/tensorflow | tensorflow/contrib/learn/python/learn/learn_runner.py | 12 | 5127 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs an Experiment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.python.platform import tf_logging as logging
def run(experiment_fn, output_dir, schedule=None):
"""Make and run an experiment.
It creates an Experiment by calling `experiment_fn`. Then it calls the
function named as `schedule` of the Experiment.
If schedule is not provided, then the default schedule for the current task
type is used. The defaults are as follows:
* 'ps' maps to 'serve'
* 'worker' maps to 'train'
* 'master' maps to 'local_run'
If the experiment's config does not include a task type, then an exception
is raised.
Example:
```
def _create_my_experiment(output_dir):
return tf.contrib.learn.Experiment(
estimator=my_estimator(model_dir=output_dir),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
learn_runner.run(
experiment_fn=_create_my_experiment,
output_dir="some/output/dir",
schedule="train")
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `output_dir` which should be used to create the `Estimator`
(passed as `model_dir` to its constructor). It must return an
`Experiment`.
output_dir: Base output directory.
schedule: The name of the method in the `Experiment` to run.
Returns:
The return value of function `schedule`.
Raises:
ValueError: If `output_dir` is empty, `schedule` is None but no task
type is set in the built experiment's config, the task type has no
default, or `schedule` doesn't reference a member of `Experiment`.
TypeError: `schedule` references non-callable member.
"""
if not output_dir:
raise ValueError('Must specify an output directory')
if not callable(experiment_fn):
raise TypeError('Experiment builder "%s" is not callable.' %
experiment_fn)
# Call the builder
experiment = experiment_fn(output_dir=output_dir)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
# Get the schedule
config = experiment.estimator.config
schedule = schedule or _get_default_schedule(config)
# Execute the schedule
if not hasattr(experiment, schedule):
logging.error('Schedule references non-existent task %s', schedule)
valid_tasks = [x for x in experiment.__dict__
if callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise ValueError('Schedule references non-existent task %s', schedule)
task = getattr(experiment, schedule)
if not callable(task):
logging.error('Schedule references non-callable member %s', schedule)
valid_tasks = [
x for x in experiment.__dict__
if callable(getattr(experiment, x)) and not x.startswith('_')
]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise TypeError('Schedule references non-callable member %s', schedule)
return task()
def _is_distributed(config):
"""Returns true if this is a distributed job."""
if not config.cluster_spec:
return False
# This is considered a distributed job if there is more than one task
# in the cluster spec.
task_count = 0
for job in config.cluster_spec.jobs:
for _ in config.cluster_spec.job_tasks(job):
task_count += 1
return task_count > 1
def _get_default_schedule(config):
"""Returns the default schedule for the provided RunConfig."""
if not config or not _is_distributed(config):
return 'train_and_evaluate'
if not config.task_type:
raise ValueError('Must specify a schedule')
if config.task_type == run_config.TaskType.MASTER:
# TODO(rhaertel): handle the case where there is more than one master
# or explicitly disallow such a case.
return 'train_and_evaluate'
elif config.task_type == run_config.TaskType.PS:
return 'run_std_server'
elif config.task_type == run_config.TaskType.WORKER:
return 'train'
raise ValueError('No default schedule for task type: %s' % (config.task_type))
| apache-2.0 |
jiangxb1987/spark | examples/src/main/python/ml/logistic_regression_with_elastic_net.py | 123 | 2029 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import LogisticRegression
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("LogisticRegressionWithElasticNet")\
.getOrCreate()
# $example on$
# Load training data
training = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
# Fit the model
lrModel = lr.fit(training)
# Print the coefficients and intercept for logistic regression
print("Coefficients: " + str(lrModel.coefficients))
print("Intercept: " + str(lrModel.intercept))
# We can also use the multinomial family for binary classification
mlr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8, family="multinomial")
# Fit the model
mlrModel = mlr.fit(training)
# Print the coefficients and intercepts for logistic regression with multinomial family
print("Multinomial coefficients: " + str(mlrModel.coefficientMatrix))
print("Multinomial intercepts: " + str(mlrModel.interceptVector))
# $example off$
spark.stop()
| apache-2.0 |
asrie/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/ports_mock.py | 121 | 2482 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockPort(object):
def name(self):
return "MockPort"
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
def update_webkit_command(self, non_interactive=False):
return ["mock-update-webkit"]
def build_webkit_command(self, build_style=None):
return ["mock-build-webkit"]
def prepare_changelog_command(self):
return ['mock-prepare-ChangeLog']
def run_python_unittests_command(self):
return ['mock-test-webkitpy']
def run_perl_unittests_command(self):
return ['mock-test-webkitperl']
def run_javascriptcore_tests_command(self):
return ['mock-run-javacriptcore-tests']
def run_webkit_unit_tests_command(self):
return ['mock-run-webkit-unit-tests']
def run_webkit_tests_command(self):
return ['mock-run-webkit-tests']
def run_bindings_tests_command(self):
return ['mock-run-bindings-tests']
| bsd-3-clause |
rafaeltomesouza/frontend-class1 | aula2/a12/linkedin/client/.gradle/yarn/node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
allanino/nupic | tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py | 32 | 13701 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'type': 'DateEncoder',
'timeOfDay': (21, 1)},
'timestamp_dayOfWeek': {
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder',
'dayOfWeek': (21, 1)},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'type': 'AdaptiveScalarEncoder',
'clipInput': True,
'n': 100,
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
JFriel/honours_project | venv/lib/python2.7/site-packages/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-3.0 |
SripriyaSeetharam/tacker | tacker/vm/mgmt_drivers/rpc/rpc.py | 1 | 3910 | # Copyright 2014 Intel Corporation.
# Copyright 2014 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Isaku Yamahata, Intel Corporation.
from tacker.common import rpc_compat
from tacker.common import topics
from tacker.vm.mgmt_drivers import abstract_driver
from tacker.vm.mgmt_drivers import constants
class ServiceVMAgentRpcApi(rpc_compat.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=topics.SERVICEVM_AGENT):
super(ServiceVMAgentRpcApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
def rpc_cast(self, context, method, kwargs, topic):
self.cast(context, self.make_msg(method, **kwargs), topic=topic)
# TODO(yamahata): port this to oslo.messaging
# address format needs be changed to
# oslo.messaging.target.Target
class AgentRpcMGMTDriver(abstract_driver.DeviceMGMTAbstractDriver):
_TOPIC = topics.SERVICEVM_AGENT # can be overridden by subclass
_RPC_API = {} # topic -> ServiceVMAgentRpcApi
@property
def _rpc_api(self):
topic = self._TOPIC
api = self._RPC_API.get(topic)
if api is None:
api = ServiceVMAgentRpcApi(topic=topic)
api = self._RPC_API.setdefault(topic, api)
return api
def get_type(self):
return 'agent-rpc'
def get_name(self):
return 'agent-rpc'
def get_description(self):
return 'agent-rpc'
def mgmt_get_config(self, plugin, context, device):
return {'/etc/tacker/servicevm-agent.ini':
'[servicevm]\n'
'topic = %s\n'
'device_id = %s\n'
% (self._TOPIC, device['id'])}
@staticmethod
def _address(topic, server):
return '%s.%s' % (topic, server)
def _mgmt_server(self, device):
return device['id']
def _mgmt_topic(self, device):
return '%s-%s' % (self._TOPIC, self._mgmt_server(device))
def mgmt_url(self, plugin, context, device):
return self._address(self._mgmt_topic(device),
self._mgmt_server(device))
def mgmt_call(self, plugin, context, device, kwargs):
topic = device['mgmt_url']
method = kwargs[constants.KEY_ACTION]
kwargs_ = kwargs[constants.KEY_KWARGS]
self._rpc_api.rpc_cast(context, method, kwargs_, topic)
def _mgmt_service_server(self, device, service_instance):
return '%s-%s' % (device['id'], service_instance['id'])
def _mgmt_service_topic(self, device, service_instance):
return '%s-%s' % (self._TOPIC,
self._mgmt_service_server(device, service_instance))
def mgmt_service_address(self, plugin, context, device, service_instance):
return self._address(
self._mgmt_service_topic(device, service_instance),
self._mgmt_service_server(device, service_instance))
def mgmt_service_call(self, plugin, context, device,
service_instance, kwargs):
method = kwargs[constants.KEY_ACTION]
kwargs_ = kwargs[constants.KEY_KWARGS]
topic = service_instance['mgmt_url']
self._rpc_api.rpc_cast(context, method, kwargs_, topic)
| apache-2.0 |
faux123/Galaxy_Note_2 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
eyohansa/django | tests/messages_tests/base.py | 319 | 14243 | from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import modify_settings, override_settings
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/deps2git/deps2submodules.py | 8 | 6032 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Read .DEPS.git and use the information to update git submodules"""
import optparse
import os
import re
import subprocess
import sys
from deps_utils import GetDepsContent
SHA1_RE = re.compile('[0-9a-fA-F]{40}')
def SanitizeDeps(submods):
"""
Look for conflicts (primarily nested submodules) in submodule data. In the
case of a conflict, the higher-level (shallower) submodule takes precedence.
Modifies the submods argument in-place.
"""
for submod_name in submods.keys():
parts = submod_name.split('/')[:-1]
while parts:
may_conflict = '/'.join(parts)
if may_conflict in submods:
msg = ('Warning: dropping submodule "%s", because '
'it is nested in submodule "%s".' % (submod_name, may_conflict))
print >> sys.stderr, msg
submods.pop(submod_name)
break
parts.pop()
return submods
def CollateDeps(deps_content):
"""
Take the output of deps_utils.GetDepsContent and return a hash of:
{ submod_name : [ [ submod_os, ... ], submod_url, submod_sha1 ], ... }
"""
fixdep = lambda x: x[4:] if x.startswith('src/') else x
spliturl = lambda x: list(x.partition('@')[0::2]) if x else [None, None]
submods = {}
# Non-OS-specific DEPS always override OS-specific deps. This is an interim
# hack until there is a better way to handle OS-specific DEPS.
for (deps_os, val) in deps_content[1].iteritems():
for (dep, url) in val.iteritems():
submod_data = submods.setdefault(fixdep(dep), [[]] + spliturl(url))
submod_data[0].append(deps_os)
for (dep, url) in deps_content[0].iteritems():
submods[fixdep(dep)] = [['all']] + spliturl(url)
return submods
def WriteGitmodules(submods, gitless=False, rewrite_rules=None):
"""
Take the output of CollateDeps, use it to write a .gitmodules file and
return a map of submodule name -> sha1 to be added to the git index.
"""
adds = {}
if not rewrite_rules:
rewrite_rules = []
def _rewrite(url):
if not url:
return url
for rule in rewrite_rules:
if url.startswith(rule[0]):
return rule[1] + url[len(rule[0]):]
return url
fh = open('.gitmodules', 'w')
for submod in sorted(submods.keys()):
[submod_os, submod_url, submod_sha1] = submods[submod]
submod_url = _rewrite(submod_url)
print >> fh, '[submodule "%s"]' % submod
print >> fh, '\tpath = %s' % submod
print >> fh, '\turl = %s' % (submod_url if submod_url else '')
print >> fh, '\tos = %s' % ','.join(submod_os)
if submod_sha1 and not SHA1_RE.match(submod_sha1):
raise RuntimeError('sha1 hash "%s" for submodule "%s" is malformed' %
(submod_sha1, submod))
if gitless or not submod_url:
continue
if not submod_sha1:
# We don't know what sha1 to register, so we have to infer it from the
# submodule's origin/master.
if not os.path.exists(os.path.join(submod, '.git')):
# Not cloned yet
subprocess.check_call(['git', 'clone', '-n', submod_url, submod])
else:
# Already cloned; let's fetch
subprocess.check_call(['git', 'fetch', 'origin'], cwd=submod)
sub = subprocess.Popen(['git', 'rev-list', 'origin/HEAD^!'],
cwd=submod, stdout=subprocess.PIPE)
submod_sha1 = sub.communicate()[0].rstrip()
adds[submod] = submod_sha1
fh.close()
if not gitless:
subprocess.check_call(['git', 'add', '.gitmodules'])
return adds
def RemoveObsoleteSubmodules():
"""
Delete from the git repository any submodules which aren't in .gitmodules.
"""
lsfiles_proc = subprocess.Popen(['git', 'ls-files', '-s'],
stdout=subprocess.PIPE)
grep_proc = subprocess.Popen(['grep', '^160000'],
stdin = lsfiles_proc.stdout,
stdout=subprocess.PIPE)
(grep_out, _) = grep_proc.communicate() or ('', '')
lsfiles_proc.communicate()
with open(os.devnull, 'w') as nullpipe:
for line in grep_out.splitlines():
[_, _, _, path] = line.split()
cmd = ['git', 'config', '-f', '.gitmodules',
'--get-regexp', 'submodule\..*\.path', '^%s$' % path]
try:
subprocess.check_call(cmd, stdout=nullpipe)
except subprocess.CalledProcessError:
subprocess.check_call(['git', 'update-index', '--force-remove', path])
def main():
parser = optparse.OptionParser()
parser.add_option('--gitless', action='store_true',
help='Skip all actions that assume a git working copy '
'(to support presubmit checks)')
parser.add_option('--rewrite-url', action='append', metavar='OLD_URL=NEW_URL',
default=[], help='Translate urls according to this rule')
options, args = parser.parse_args()
if args:
deps_file = args[0]
else:
deps_file = '.DEPS.git'
rewrite_rules = []
for rule in options.rewrite_url:
(old_url, new_url) = rule.split('=', 1)
if not old_url or not new_url:
print 'Bad url rewrite rule: "%s"' % rule
parser.print_help()
return 1
rewrite_rules.append((old_url, new_url))
# 9/18/2012 -- HACK to fix try bots without restarting
hack_deps_file = os.path.join('src', '.DEPS.git')
if not os.path.exists(deps_file) and os.path.exists(hack_deps_file):
deps_file = hack_deps_file
adds = WriteGitmodules(SanitizeDeps(CollateDeps(GetDepsContent(deps_file))),
rewrite_rules=rewrite_rules, gitless=options.gitless)
if not options.gitless:
RemoveObsoleteSubmodules()
for submod_path, submod_sha1 in adds.iteritems():
subprocess.check_call(['git', 'update-index', '--add',
'--cacheinfo', '160000', submod_sha1, submod_path])
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
basepi/hubble | hubblestack/splunklogging.py | 1 | 7572 | '''
Hubblestack python log handler for splunk
Uses the same configuration as the rest of the splunk returners, returns to
the same destination but with an alternate sourcetype (``hubble_log`` by
default)
.. code-block:: yaml
hubblestack:
returner:
splunk:
- token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
indexer: splunk-indexer.domain.tld
index: hubble
sourcetype_log: hubble_log
You can also add an `custom_fields` argument which is a list of keys to add to events
with using the results of config.get(<custom_field>). These new keys will be prefixed
with 'custom_' to prevent conflicts. The values of these keys should be
strings or lists (will be sent as CSV string), do not choose grains or pillar values with complex values or they will
be skipped:
.. code-block:: yaml
hubblestack:
returner:
splunk:
- token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
indexer: splunk-indexer.domain.tld
index: hubble
sourcetype_log: hubble_log
custom_fields:
- site
- product_group
'''
import socket
# Imports for http event forwarder
import requests
import json
import time
import copy
from hubblestack.hec import http_event_collector, get_splunk_options, make_hec_args
import logging
class SplunkHandler(logging.Handler):
'''
Log handler for splunk
'''
def __init__(self):
super(SplunkHandler, self).__init__()
self.opts_list = get_splunk_options()
self.endpoint_list = []
# Get cloud details
cloud_details = __grains__.get('cloud_details', {})
for opts in self.opts_list:
custom_fields = opts['custom_fields']
# Set up the fields to be extracted at index time. The field values must be strings.
# Note that these fields will also still be available in the event data
index_extracted_fields = []
try:
index_extracted_fields.extend(__opts__.get('splunk_index_extracted_fields', []))
except TypeError:
pass
# Set up the collector
args, kwargs = make_hec_args(opts)
hec = http_event_collector(*args, **kwargs)
minion_id = __grains__['id']
master = __grains__['master']
fqdn = __grains__['fqdn']
# Sometimes fqdn is blank. If it is, replace it with minion_id
fqdn = fqdn if fqdn else minion_id
try:
fqdn_ip4 = __grains__.get('local_ip4')
if not fqdn_ip4:
fqdn_ip4 = __grains__['fqdn_ip4'][0]
except IndexError:
try:
fqdn_ip4 = __grains__['ipv4'][0]
except IndexError:
raise Exception('No ipv4 grains found. Is net-tools installed?')
if fqdn_ip4.startswith('127.'):
for ip4_addr in __grains__['ipv4']:
if ip4_addr and not ip4_addr.startswith('127.'):
fqdn_ip4 = ip4_addr
break
# Sometimes fqdn reports a value of localhost. If that happens, try another method.
bad_fqdns = ['localhost', 'localhost.localdomain', 'localhost6.localdomain6']
if fqdn in bad_fqdns:
new_fqdn = socket.gethostname()
if '.' not in new_fqdn or new_fqdn in bad_fqdns:
new_fqdn = fqdn_ip4
fqdn = new_fqdn
event = {}
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})
event.update({'system_uuid': __grains__.get('system_uuid')})
event.update(cloud_details)
for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, (str, unicode)):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})
payload = {}
payload.update({'host': fqdn})
payload.update({'index': opts['index']})
payload.update({'sourcetype': opts['sourcetype']})
# Potentially add metadata fields:
fields = {}
for item in index_extracted_fields:
if item in event and not isinstance(event[item], (list, dict, tuple)):
fields["meta_%s" % item] = str(event[item])
if fields:
payload.update({'fields': fields})
self.endpoint_list.append((hec, event, payload))
def emit(self, record):
'''
Emit a single record using the hec/event template/payload template
generated in __init__()
'''
# NOTE: poor man's filtering ... goal: prevent logging loops and
# various objects from logging to splunk in an infinite spiral of spam.
# This might be more stylish as a logging.Filter, but that would need
# to be re-added everywhere SplunkHandler is added to the logging tree.
# Also, we don't wish to filter the logging, only to filter it from
# splunk; so any logging.Filter would need to be very carefully added
# to work right.
rpn = getattr(record, 'pathname', '')
filtered = ('hubblestack/splunklogging', 'hubblestack/hec/', 'urllib3/connectionpool')
for i in filtered:
if i in rpn:
return
log_entry = self.format_record(record)
for hec, event, payload in self.endpoint_list:
event = copy.deepcopy(event)
payload = copy.deepcopy(payload)
event.update(log_entry)
payload['event'] = event
# no_queue tells the hec never to queue the data to disk
hec.batchEvent(payload, eventtime=time.time(), no_queue=True)
hec.flushBatch()
return True
def emit_data(self, data):
'''
Add the given data (in dict format!) to the event template and emit as
usual
'''
for hec, event, payload in self.endpoint_list:
event = copy.deepcopy(event)
payload = copy.deepcopy(payload)
event.update(data)
payload['event'] = event
# no_queue tells the hec never to queue the data to disk
hec.batchEvent(payload, eventtime=time.time(), no_queue=True)
hec.flushBatch()
return True
def format_record(self, record):
'''
Format the log record into a dictionary for easy insertion into a
splunk event dictionary
'''
try:
log_entry = {'message': record.message,
'level': record.levelname,
'timestamp': int(time.time()),
'loggername': record.name,
}
except:
log_entry = {'message': record.msg,
'level': record.levelname,
'loggername': record.name,
'timestamp': int(time.time()),
}
return log_entry
| apache-2.0 |
jesusfcr/airflow | tests/configuration.py | 52 | 1889 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
from airflow import configuration
from airflow.configuration import conf
class ConfTest(unittest.TestCase):
def setup(self):
configuration.load_test_config()
def test_env_var_config(self):
opt = conf.get('testsection', 'testkey')
self.assertEqual(opt, 'testvalue')
def test_conf_as_dict(self):
cfg_dict = conf.as_dict()
# test that configs are picked up
self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True')
# test env vars
self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >')
# test display_source
cfg_dict = conf.as_dict(display_source=True)
self.assertEqual(
cfg_dict['core']['load_examples'][1], 'airflow config')
self.assertEqual(
cfg_dict['testsection']['testkey'], ('< hidden >', 'env var'))
# test display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
# test display_source and display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True, display_source=True)
self.assertEqual(
cfg_dict['testsection']['testkey'], ('testvalue', 'env var'))
| apache-2.0 |
pinotlytics/pinot | pinot-dashboard/pinotui/config.py | 17 | 2368 | #!/usr/bin/env python2.6
#
# Copyright (C) 2015 LinkedIn Corp. (pinot-core@linkedin.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from exceptions import PinotException
class ConfigManager(object):
def __init__(self, logger):
self.path = 'config.yml'
self.logger = logger
self.config = {}
def load(self):
try:
with open(self.path, 'r') as h:
contents = h.read()
except IOError:
self.logger.exception('Failed reading config file')
return
try:
self.config = yaml.load(contents)
except yaml.YAMLError:
self.logger.exception('Failed parsing config yaml')
def update(self, conf):
self.config.update(conf)
def get_controller_url(self, fabric):
try:
return self.config['fabrics'][fabric]['controller_url']
except KeyError:
error = 'Failed getting controller url from config'
self.logger.exception(error)
raise PinotException(error)
def get_zk_host(self, fabric):
try:
return self.config['fabrics'][fabric]['zk_host']
except KeyError:
error = 'Failed getting zookeeper host from config'
self.logger.exception(error)
raise PinotException(error)
def get_zk_root(self, fabric):
try:
return self.config['fabrics'][fabric]['zk_root']
except KeyError:
error = 'Failed getting zookeeper root from config'
self.logger.exception(error)
raise PinotException(error)
def get_fabrics(self):
try:
return self.config['fabrics'].keys()
except KeyError:
error = 'Failed getting list of fabrics from config'
self.logger.exception(error)
raise PinotException(error)
def get_flask_port(self):
try:
return int(self.config['listen_port'])
except (KeyError, ValueError):
self.logger.exception('Failed getting flask port from config')
| apache-2.0 |
eceglov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/server_process_mock.py | 129 | 2993 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockServerProcess(object):
def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None, crashed=False):
self.timed_out = False
self.lines = lines or []
self.crashed = crashed
self.writes = []
self.cmd = cmd
self.env = env
self.started = False
self.stopped = False
def write(self, bytes):
self.writes.append(bytes)
def has_crashed(self):
return self.crashed
def read_stdout_line(self, deadline):
return self.lines.pop(0) + "\n"
def read_stdout(self, deadline, size):
first_line = self.lines[0]
if size > len(first_line):
self.lines.pop(0)
remaining_size = size - len(first_line) - 1
if not remaining_size:
return first_line + "\n"
return first_line + "\n" + self.read_stdout(deadline, remaining_size)
result = self.lines[0][:size]
self.lines[0] = self.lines[0][size:]
return result
def pop_all_buffered_stderr(self):
return ''
def read_either_stdout_or_stderr_line(self, deadline):
# FIXME: We should have tests which intermix stderr and stdout lines.
return self.read_stdout_line(deadline), None
def start(self):
self.started = True
def stop(self, kill_directly=False):
self.stopped = True
return
def kill(self):
return
| bsd-3-clause |
sestrella/ansible | lib/ansible/modules/network/illumos/dladm_linkprop.py | 52 | 7820 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_linkprop
short_description: Manage link properties on Solaris/illumos systems.
description:
- Set / reset link properties on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
link:
description:
- Link interface name.
required: true
aliases: [ "nic", "interface" ]
property:
description:
- Specifies the name of the property we want to manage.
required: true
aliases: [ "name" ]
value:
description:
- Specifies the value we want to set for the link property.
required: false
temporary:
description:
- Specifies that lin property configuration is temporary. Temporary
link property configuration does not persist across reboots.
required: false
type: bool
default: false
state:
description:
- Set or reset the property value.
required: false
default: "present"
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
- name: Set 'maxbw' to 100M on e1000g1
dladm_linkprop: name=e1000g1 property=maxbw value=100M state=present
- name: Set 'mtu' to 9000 on e1000g1
dladm_linkprop: name=e1000g1 property=mtu value=9000
- name: Reset 'mtu' property on e1000g1
dladm_linkprop: name=e1000g1 property=mtu state=reset
'''
RETURN = '''
property:
description: property name
returned: always
type: str
sample: mtu
state:
description: state of the target
returned: always
type: str
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: bool
sample: True
link:
description: link name
returned: always
type: str
sample: e100g0
value:
description: property value
returned: always
type: str
sample: 9000
'''
from ansible.module_utils.basic import AnsibleModule
class LinkProp(object):
def __init__(self, module):
self.module = module
self.link = module.params['link']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
self.dladm_bin = self.module.get_bin_path('dladm', True)
def property_exists(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" on link %s' %
(self.property, self.link),
property=self.property,
link=self.link)
def property_is_modified(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_readonly(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('perm')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and out == 'r-':
return True
else:
return False
def property_is_set(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.dladm_bin]
cmd.append('set-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + '=' + self.value)
cmd.append(self.link)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.dladm_bin]
cmd.append('reset-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
link=dict(required=True, default=None, type='str', aliases=['nic', 'interface']),
property=dict(required=True, type='str', aliases=['name']),
value=dict(required=False, type='str'),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
required_if=[
['state', 'present', ['value']],
],
supports_check_mode=True
)
linkprop = LinkProp(module)
rc = None
out = ''
err = ''
result = {}
result['property'] = linkprop.property
result['link'] = linkprop.link
result['state'] = linkprop.state
if linkprop.value:
result['value'] = linkprop.value
if linkprop.state == 'absent' or linkprop.state == 'reset':
if linkprop.property_exists():
if not linkprop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.reset_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
elif linkprop.state == 'present':
if linkprop.property_exists():
if not linkprop.property_is_readonly():
if not linkprop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.set_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
else:
module.fail_json(msg='Property "%s" is read-only!' % (linkprop.property),
property=linkprop.property,
link=linkprop.link)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/site-packages/requests/packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
psi4/mongo_qcdb | qcfractal/server.py | 1 | 25510 | """
The FractalServer class
"""
import asyncio
import datetime
import logging
import ssl
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Union
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
from .extras import get_information
from .interface import FractalClient
from .queue import QueueManager, QueueManagerHandler, ServiceQueueHandler, TaskQueueHandler, ComputeManagerHandler
from .services import construct_service
from .storage_sockets import ViewHandler, storage_socket_factory
from .storage_sockets.api_logger import API_AccessLogger
from .web_handlers import (
CollectionHandler,
InformationHandler,
KeywordHandler,
KVStoreHandler,
MoleculeHandler,
OptimizationHandler,
ProcedureHandler,
ResultHandler,
WavefunctionStoreHandler,
)
myFormatter = logging.Formatter("[%(asctime)s] %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p")
def _build_ssl():
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import sys
import socket
import ipaddress
import random
hostname = socket.gethostname()
public_ip = ipaddress.ip_address(socket.gethostbyname(hostname))
key = rsa.generate_private_key(public_exponent=65537, key_size=1024, backend=default_backend())
alt_name_list = [x509.DNSName(hostname), x509.IPAddress(ipaddress.ip_address(public_ip))]
alt_names = x509.SubjectAlternativeName(alt_name_list)
# Basic data
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, hostname)])
basic_contraints = x509.BasicConstraints(ca=True, path_length=0)
now = datetime.datetime.utcnow()
# Build cert
cert = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.public_key(key.public_key())
.serial_number(int(random.random() * sys.maxsize))
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=10 * 365))
.add_extension(basic_contraints, False)
.add_extension(alt_names, False)
.sign(key, hashes.SHA256(), default_backend())
) # yapf: disable
# Build and return keys
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
) # yapf: disable
return cert_pem, key_pem
class FractalServer:
def __init__(
self,
# Server info options
name: str = "QCFractal Server",
port: int = 7777,
loop: "IOLoop" = None,
compress_response: bool = True,
# Security
security: Optional[str] = None,
allow_read: bool = False,
ssl_options: Union[bool, Dict[str, str]] = True,
# Database options
storage_uri: str = "postgresql://localhost:5432",
storage_project_name: str = "qcfractal_default",
query_limit: int = 1000,
# View options
view_enabled: bool = False,
view_path: Optional[str] = None,
# Log options
logfile_prefix: str = None,
log_apis: bool = False,
geo_file_path: str = None,
# Queue options
queue_socket: "BaseAdapter" = None,
heartbeat_frequency: float = 1800,
# Service options
max_active_services: int = 20,
service_frequency: float = 60,
# Testing functions
skip_storage_version_check=True,
):
"""QCFractal initialization
Parameters
----------
name : str, optional
The name of the server itself, provided when users query information
port : int, optional
The port the server will listen on.
loop : IOLoop, optional
Provide an IOLoop to use for the server
compress_response : bool, optional
Automatic compression of responses, turn on unless behind a proxy that
provides this capability.
security : Optional[str], optional
The security options for the server {None, "local"}. The local security
option uses the database to cache users.
allow_read : bool, optional
Allow unregistered to perform GET operations on Molecule/KeywordSets/KVStore/Results/Procedures
ssl_options : Optional[Dict[str, str]], optional
True, automatically creates self-signed SSL certificates. False, turns off SSL entirely. A user can also supply a dictionary of valid certificates.
storage_uri : str, optional
The database URI that the underlying storage socket will connect to.
storage_project_name : str, optional
The project name to use on the database.
query_limit : int, optional
The maximum number of entries a query will return.
logfile_prefix : str, optional
The logfile to use for logging.
queue_socket : BaseAdapter, optional
An optional Adapter to provide for server to have limited local compute.
Should only be used for testing and interactive sessions.
heartbeat_frequency : float, optional
The time (in seconds) of the heartbeat manager frequency.
max_active_services : int, optional
The maximum number of active Services that can be running at any given time.
service_frequency : float, optional
The time (in seconds) before checking and updating services.
"""
# Save local options
self.name = name
self.port = port
if ssl_options is False:
self._address = "http://localhost:" + str(self.port) + "/"
else:
self._address = "https://localhost:" + str(self.port) + "/"
self.max_active_services = max_active_services
self.service_frequency = service_frequency
self.heartbeat_frequency = heartbeat_frequency
# Setup logging.
if logfile_prefix is not None:
tornado.options.options["log_file_prefix"] = logfile_prefix
tornado.log.enable_pretty_logging()
self.logger = logging.getLogger("tornado.application")
# Create API Access logger class if enables
if log_apis:
self.api_logger = API_AccessLogger(geo_file_path=geo_file_path)
else:
self.api_logger = None
# Build security layers
if security is None:
storage_bypass_security = True
elif security == "local":
storage_bypass_security = False
else:
raise KeyError("Security option '{}' not recognized.".format(security))
# Handle SSL
ssl_ctx = None
self.client_verify = True
if ssl_options is True:
self.logger.warning("No SSL files passed in, generating self-signed SSL certificate.")
self.logger.warning("Clients must use `verify=False` when connecting.\n")
cert, key = _build_ssl()
# Add quick names
ssl_name = name.lower().replace(" ", "_")
cert_name = ssl_name + "_ssl.crt"
key_name = ssl_name + "_ssl.key"
ssl_options = {"crt": cert_name, "key": key_name}
with open(cert_name, "wb") as handle:
handle.write(cert)
with open(key_name, "wb") as handle:
handle.write(key)
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
# Destroy keyfiles upon close
import atexit
import os
atexit.register(os.remove, cert_name)
atexit.register(os.remove, key_name)
self.client_verify = False
elif ssl_options is False:
ssl_ctx = None
elif isinstance(ssl_options, dict):
if ("crt" not in ssl_options) or ("key" not in ssl_options):
raise KeyError("'crt' (SSL Certificate) and 'key' (SSL Key) fields are required for `ssl_options`.")
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
else:
raise KeyError("ssl_options not understood")
# Setup the database connection
self.storage_database = storage_project_name
self.storage_uri = storage_uri
self.storage = storage_socket_factory(
storage_uri,
project_name=storage_project_name,
bypass_security=storage_bypass_security,
allow_read=allow_read,
max_limit=query_limit,
skip_version_check=skip_storage_version_check,
)
if view_enabled:
self.view_handler = ViewHandler(view_path)
else:
self.view_handler = None
# Pull the current loop if we need it
self.loop = loop or tornado.ioloop.IOLoop.current()
# Build up the application
self.objects = {
"storage_socket": self.storage,
"logger": self.logger,
"api_logger": self.api_logger,
"view_handler": self.view_handler,
}
# Public information
self.objects["public_information"] = {
"name": self.name,
"heartbeat_frequency": self.heartbeat_frequency,
"version": get_information("version"),
"query_limit": self.storage.get_limit(1.0e9),
"client_lower_version_limit": "0.12.1", # Must be XX.YY.ZZ
"client_upper_version_limit": "0.13.99", # Must be XX.YY.ZZ
}
self.update_public_information()
endpoints = [
# Generic web handlers
(r"/information", InformationHandler, self.objects),
(r"/kvstore", KVStoreHandler, self.objects),
(r"/molecule", MoleculeHandler, self.objects),
(r"/keyword", KeywordHandler, self.objects),
(r"/collection(?:/([0-9]+)(?:/(value|entry|list|molecule))?)?", CollectionHandler, self.objects),
(r"/result", ResultHandler, self.objects),
(r"/wavefunctionstore", WavefunctionStoreHandler, self.objects),
(r"/procedure/?", ProcedureHandler, self.objects),
(r"/optimization/(.*)/?", OptimizationHandler, self.objects),
# Queue Schedulers
(r"/task_queue", TaskQueueHandler, self.objects),
(r"/service_queue", ServiceQueueHandler, self.objects),
(r"/queue_manager", QueueManagerHandler, self.objects),
(r"/manager", ComputeManagerHandler, self.objects),
]
# Build the app
app_settings = {"compress_response": compress_response}
self.app = tornado.web.Application(endpoints, **app_settings)
self.endpoints = set([v[0].replace("/", "", 1) for v in endpoints])
self.http_server = tornado.httpserver.HTTPServer(self.app, ssl_options=ssl_ctx)
self.http_server.listen(self.port)
# Add periodic callback holders
self.periodic = {}
# Exit callbacks
self.exit_callbacks = []
self.logger.info("FractalServer:")
self.logger.info(" Name: {}".format(self.name))
self.logger.info(" Version: {}".format(get_information("version")))
self.logger.info(" Address: {}".format(self._address))
self.logger.info(" Database URI: {}".format(storage_uri))
self.logger.info(" Database Name: {}".format(storage_project_name))
self.logger.info(" Query Limit: {}\n".format(self.storage.get_limit(1.0e9)))
self.loop_active = False
# Create a executor for background processes
self.executor = ThreadPoolExecutor(max_workers=2)
self.futures = {}
# Queue manager if direct build
self.queue_socket = queue_socket
if self.queue_socket is not None:
if security == "local":
raise ValueError("Cannot yet use local security with a internal QueueManager")
def _build_manager():
client = FractalClient(self, username="qcfractal_server")
self.objects["queue_manager"] = QueueManager(
client,
self.queue_socket,
logger=self.logger,
manager_name="FractalServer",
cores_per_task=1,
memory_per_task=1,
verbose=False,
)
# Build the queue manager, will not run until loop starts
self.futures["queue_manager_future"] = self._run_in_thread(_build_manager)
def __repr__(self):
return f"FractalServer(name='{self.name}' uri='{self._address}')"
def _run_in_thread(self, func, timeout=5):
"""
Runs a function in a background thread
"""
if self.executor is None:
raise AttributeError("No Executor was created, but run_in_thread was called.")
fut = self.loop.run_in_executor(self.executor, func)
return fut
## Start/stop functionality
def start(self, start_loop: bool = True, start_periodics: bool = True) -> None:
"""
Starts up the IOLoop and periodic calls.
Parameters
----------
start_loop : bool, optional
If False, does not start the IOLoop
start_periodics : bool, optional
If False, does not start the server periodic updates such as
Service iterations and Manager heartbeat checking.
"""
if "queue_manager_future" in self.futures:
def start_manager():
self._check_manager("manager_build")
self.objects["queue_manager"].start()
# Call this after the loop has started
self._run_in_thread(start_manager)
# Add services callback
if start_periodics:
nanny_services = tornado.ioloop.PeriodicCallback(self.update_services, self.service_frequency * 1000)
nanny_services.start()
self.periodic["update_services"] = nanny_services
# Check Manager heartbeats, 5x heartbeat frequency
heartbeats = tornado.ioloop.PeriodicCallback(
self.check_manager_heartbeats, self.heartbeat_frequency * 1000 * 0.2
)
heartbeats.start()
self.periodic["heartbeats"] = heartbeats
# Log can take some time, update in thread
def run_log_update_in_thread():
self._run_in_thread(self.update_server_log)
server_log = tornado.ioloop.PeriodicCallback(run_log_update_in_thread, self.heartbeat_frequency * 1000)
server_log.start()
self.periodic["server_log"] = server_log
# Build callbacks which are always required
public_info = tornado.ioloop.PeriodicCallback(self.update_public_information, self.heartbeat_frequency * 1000)
public_info.start()
self.periodic["public_info"] = public_info
# Soft quit with a keyboard interrupt
self.logger.info("FractalServer successfully started.\n")
if start_loop:
self.loop_active = True
self.loop.start()
def stop(self, stop_loop: bool = True) -> None:
"""
Shuts down the IOLoop and periodic updates.
Parameters
----------
stop_loop : bool, optional
If False, does not shut down the IOLoop. Useful if the IOLoop is externally managed.
"""
# Shut down queue manager
if "queue_manager" in self.objects:
self._run_in_thread(self.objects["queue_manager"].stop)
# Close down periodics
for cb in self.periodic.values():
cb.stop()
# Call exit callbacks
for func, args, kwargs in self.exit_callbacks:
func(*args, **kwargs)
# Shutdown executor and futures
for k, v in self.futures.items():
v.cancel()
if self.executor is not None:
self.executor.shutdown()
# Shutdown IOLoop if needed
if (asyncio.get_event_loop().is_running()) and stop_loop:
self.loop.stop()
self.loop_active = False
# Final shutdown
if stop_loop:
self.loop.close(all_fds=True)
self.logger.info("FractalServer stopping gracefully. Stopped IOLoop.\n")
def add_exit_callback(self, callback, *args, **kwargs):
"""Adds additional callbacks to perform when closing down the server.
Parameters
----------
callback : callable
The function to call at exit
*args
Arguments to call with the function.
**kwargs
Kwargs to call with the function.
"""
self.exit_callbacks.append((callback, args, kwargs))
## Helpers
def get_address(self, endpoint: Optional[str] = None) -> str:
"""Obtains the full URI for a given function on the FractalServer.
Parameters
----------
endpoint : Optional[str], optional
Specifies a endpoint to provide the URI for. If None returns the server address.
Returns
-------
str
The endpoint URI
"""
if endpoint and (endpoint not in self.endpoints):
raise AttributeError("Endpoint '{}' not found.".format(endpoint))
if endpoint:
return self._address + endpoint
else:
return self._address
## Updates
def update_services(self) -> int:
"""Runs through all active services and examines their current status.
"""
# Grab current services
current_services = self.storage.get_services(status="RUNNING")["data"]
# Grab new services if we have open slots
open_slots = max(0, self.max_active_services - len(current_services))
if open_slots > 0:
new_services = self.storage.get_services(status="WAITING", limit=open_slots)["data"]
current_services.extend(new_services)
if len(new_services):
self.logger.info(f"Starting {len(new_services)} new services.")
self.logger.debug(f"Updating {len(current_services)} services.")
# Loop over the services and iterate
running_services = 0
completed_services = []
for data in current_services:
# Attempt to iteration and get message
try:
service = construct_service(self.storage, self.logger, data)
finished = service.iterate()
except Exception:
error_message = "FractalServer Service Build and Iterate Error:\n{}".format(traceback.format_exc())
self.logger.error(error_message)
service.status = "ERROR"
service.error = {"error_type": "iteration_error", "error_message": error_message}
finished = False
self.storage.update_services([service])
# Mark procedure and service as error
if service.status == "ERROR":
self.storage.update_service_status("ERROR", id=service.id)
if finished is not False:
# Add results to procedures, remove complete_ids
completed_services.append(service)
else:
running_services += 1
if len(completed_services):
self.logger.info(f"Completed {len(completed_services)} services.")
# Add new procedures and services
self.storage.services_completed(completed_services)
return running_services
def update_server_log(self) -> Dict[str, Any]:
"""
Updates the servers internal log
"""
return self.storage.log_server_stats()
def update_public_information(self) -> None:
"""
Updates the public information data
"""
data = self.storage.get_server_stats_log(limit=1)["data"]
counts = {"collection": 0, "molecule": 0, "result": 0, "kvstore": 0}
if len(data):
counts["collection"] = data[0].get("collection_count", 0)
counts["molecule"] = data[0].get("molecule_count", 0)
counts["result"] = data[0].get("result_count", 0)
counts["kvstore"] = data[0].get("kvstore_count", 0)
update = {"counts": counts}
self.objects["public_information"].update(update)
def check_manager_heartbeats(self) -> None:
"""
Checks the heartbeats and kills off managers that have not been heard from.
"""
dt = datetime.datetime.utcnow() - datetime.timedelta(seconds=self.heartbeat_frequency)
ret = self.storage.get_managers(status="ACTIVE", modified_before=dt)
for blob in ret["data"]:
nshutdown = self.storage.queue_reset_status(manager=blob["name"], reset_running=True)
self.storage.manager_update(blob["name"], returned=nshutdown, status="INACTIVE")
self.logger.info(
"Hearbeat missing from {}. Shutting down, recycling {} incomplete tasks.".format(
blob["name"], nshutdown
)
)
def list_managers(self, status: Optional[str] = None, name: Optional[str] = None) -> List[Dict[str, Any]]:
"""
Provides a list of managers associated with the server both active and inactive.
Parameters
----------
status : Optional[str], optional
Filters managers by status.
name : Optional[str], optional
Filters managers by name
Returns
-------
List[Dict[str, Any]]
The requested Manager data.
"""
return self.storage.get_managers(status=status, name=name)["data"]
def client(self):
"""
Builds a client from this server.
"""
return FractalClient(self)
### Functions only available if using a local queue_adapter
def _check_manager(self, func_name: str) -> None:
if self.queue_socket is None:
raise AttributeError(
"{} is only available if the server was initialized with a queue manager.".format(func_name)
)
# Wait up to one second for the queue manager to build
if "queue_manager" not in self.objects:
self.logger.info("Waiting on queue_manager to build.")
for x in range(20):
time.sleep(0.1)
if "queue_manager" in self.objects:
break
if "queue_manager" not in self.objects:
raise AttributeError("QueueManager never constructed.")
def update_tasks(self) -> bool:
"""Pulls tasks from the queue_adapter, inserts them into the database,
and fills the queue_adapter with new tasks.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("update_tasks")
if self.loop_active:
# Drop this in a thread so that we are not blocking each other
self._run_in_thread(self.objects["queue_manager"].update)
else:
self.objects["queue_manager"].update()
return True
def await_results(self) -> bool:
"""A synchronous method for testing or small launches
that awaits task completion before adding all queued results
to the database and returning.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_results")
self.logger.info("Updating tasks")
return self.objects["queue_manager"].await_results()
def await_services(self, max_iter: int = 10) -> bool:
"""A synchronous method that awaits the completion of all services
before returning.
Parameters
----------
max_iter : int, optional
The maximum number of service iterations the server will run through. Will
terminate early if all services have completed.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_services")
self.await_results()
for x in range(1, max_iter + 1):
self.logger.info("\nAwait services: Iteration {}\n".format(x))
running_services = self.update_services()
self.await_results()
if running_services == 0:
break
return True
def list_current_tasks(self) -> List[Any]:
"""Provides a list of tasks currently in the queue along
with the associated keys.
Returns
-------
ret : list of tuples
All tasks currently still in the database
"""
self._check_manager("list_current_tasks")
return self.objects["queue_manager"].list_current_tasks()
| bsd-3-clause |
qenter/vlc-android | toolchains/arm/lib/python2.7/test/test_pkgimport.py | 126 | 2881 | import os, sys, string, random, tempfile, unittest
from test.test_support import run_unittest
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir,
self.package_name)
os.mkdir(self.package_dir)
open(os.path.join(
self.package_dir, '__init__'+os.extsep+'py'), 'w').close()
self.module_path = os.path.join(self.package_dir, 'foo'+os.extsep+'py')
def tearDown(self):
for file in os.listdir(self.package_dir):
os.remove(os.path.join(self.package_dir, file))
os.rmdir(self.package_dir)
os.rmdir(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
for extension in "co":
compiled_path = self.module_path + extension
if os.path.exists(compiled_path):
os.remove(compiled_path)
f = open(self.module_path, 'w')
f.write(contents)
f.close()
def test_package_import__semantics(self):
# Generate a couple of broken modules to try importing.
# ...try loading the module when there's a SyntaxError
self.rewrite_file('for')
try: __import__(self.module_name)
except SyntaxError: pass
else: raise RuntimeError, 'Failed to induce SyntaxError'
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
# ...make up a variable name that isn't bound in __builtins__
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.letters)
# ...make a module that just contains that
self.rewrite_file(var)
try: __import__(self.module_name)
except NameError: pass
else: raise RuntimeError, 'Failed to induce NameError.'
# ...now change the module so that the NameError doesn't
# happen
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
def test_main():
run_unittest(TestImport)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
sinergatis/pathagar | books/utils.py | 2 | 3484 | from collections import namedtuple
from language_tags import tags
from pycountry import languages
LanguageTuple = namedtuple('languagetuple', ['code', 'description'])
# ISO-6639/2 bibliographic synonyms.
# https://en.wikipedia.org/wiki/ISO_639-2#B_and_T_codes
ISO_6639_2_B = {'alb': 'sqi',
'arm': 'hye',
'baq': 'eus',
'tib': 'bod',
'bur': 'mya',
'cze': 'ces',
'chi': 'zho',
'wel': 'cym',
'ger': 'deu',
'dut': 'nld',
'per': 'fas',
'fre': 'fra',
'geo': 'kat',
'gre': 'ell',
'ice': 'isl',
'mac': 'mkd',
'mao': 'mri',
'may': 'msa',
'rum': 'rom',
'slo': 'slk',
}
# Utility functions for django-taggit
# https://github.com/alex/django-taggit/blob/develop/docs/custom_tagging.txt
def comma_joiner(tags):
return ', '.join(t.name for t in tags)
def comma_splitter(tag_string):
return [t.strip().lower() for t in tag_string.split(',') if t.strip()]
def fix_authors(authors):
authors = authors.strip().replace(', PhD', '') \
.replace(' PhD', '') \
.replace(', PH.D.', '') \
.replace(' PH.D.', '') \
.replace(', Ph.D.', '') \
.replace(' Ph.D.', '') \
.replace(', Ph.D', '') \
.replace(' Ph.D', '') \
.replace(', M.D.', '') \
.replace(' M.D.', '') \
.replace(', MD', '') \
.replace(' MD', '')
if ',' in authors:
if ', Jr.' not in authors:
authors = [b.strip() + ' ' + a.strip()
for a, b in zip(*[iter(authors.split(','))] * 2)]
return authors
def standardize_language(code):
"""Match `code` to a standard RFC5646 or RFC3066 language. The following
approaches are tried in order:
* Match a RFC5646 language string.
* Match a RFC3066 language string.
* Use a ISO-6639/2 bibliographic synonym, and match a RFC3066 language
string for the ISO-6639/2 terminological code.
If no results are found, `None` is returned.
http://www.idpf.org/epub/30/spec/epub30-publications.html#sec-opf-dclanguage
http://www.idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.12
:param code: string with a language code ('en-GB', ...)
:returns: `LanguageTuple` with the RFC5646 code and the list of description
tags, or `None` if the language could not be identified.
"""
if not code:
return None
# Try RFC5646 (for EPUB 3).
if tags.check(code):
return LanguageTuple(code=code.lower(),
description=tags.description(code))
# Try RFC3066 (for EPUB 2).
# Try to get the ISO639-1 code for the language.
try:
lang = languages.get(iso639_2T_code=code)
new_code = lang.iso639_1_code
except KeyError:
# Try synonym.
if code in ISO_6639_2_B.keys():
try:
lang = languages.get(iso639_2T_code=ISO_6639_2_B[code])
new_code = lang.iso639_1_code
except KeyError:
return None
else:
return None
# Try RFC5646 for the ISO639-1 code.
if tags.check(new_code):
return LanguageTuple(code=new_code.lower(),
description=tags.description(new_code))
return None
| gpl-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/virtual_network_gateway_connection_list_entity.py | 1 | 7717 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: The reference to virtual network gateway
resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Gateway connection type. Possible values are:
'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_08_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_08_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_08_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, virtual_network_gateway1, connection_type, id=None, location=None, tags=None, authorization_key=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight=None, shared_key=None, peer=None, enable_bgp=None, use_policy_based_traffic_selectors=None, ipsec_policies=None, resource_guid=None, etag=None):
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(id=id, location=location, tags=tags)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.use_policy_based_traffic_selectors = use_policy_based_traffic_selectors
self.ipsec_policies = ipsec_policies
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
| mit |
rcbops/python-django-buildpackage | tests/regressiontests/generic_views/models.py | 52 | 1031 | from django.db import models
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('artist_detail', (), {'pk': self.id})
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
class Meta:
ordering = ['-pubdate']
def __unicode__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=300)
| bsd-3-clause |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/uu.py | 251 | 6555 | #! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
opened_files = []
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
in_file = open(in_file)
opened_files.append(in_file)
try:
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith('begin'):
continue
hdrfields = hdr.split(' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
output = sys.stdout
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, basestring):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, basestring):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| gpl-2.0 |
semirook/sphinxit | sphinxit/tests/test_processor.py | 4 | 7295 | # coding=utf-8
from __future__ import unicode_literals
import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
from sphinxit.core.nodes import Count, OR, RawAttr
from sphinxit.core.processor import Search, Snippet
from sphinxit.core.helpers import unix_timestamp, BaseSearchConfig
class SearchConfig(BaseSearchConfig):
WITH_STATUS = False
class TestSearch(unittest.TestCase):
def test_simple(self):
search = Search(indexes=['company'], config=SearchConfig)
search = search.match('Yandex')
self.assertEqual(
search.lex(),
"SELECT * FROM company WHERE MATCH('Yandex')"
)
def test_mutability(self):
search = Search(indexes=['company'], config=SearchConfig)
search.match('Yandex')
self.assertEqual(
search.lex(),
"SELECT * FROM company"
)
search = search.match('Yandex')
self.assertEqual(
search.lex(),
"SELECT * FROM company WHERE MATCH('Yandex')"
)
def test_with_select(self):
search = Search(indexes=['company'], config=SearchConfig)
search = search.select('id', 'date_created')
search = search.match('Yandex')
self.assertEqual(
search.lex(),
"SELECT id, date_created FROM company WHERE MATCH('Yandex')"
)
def test_with_or_filters(self):
correct_qls = [
"SELECT *, (id>=100 OR id=1) AS cnd FROM company WHERE MATCH('Yandex') AND cnd>0",
"SELECT *, (id=1 OR id>=100) AS cnd FROM company WHERE MATCH('Yandex') AND cnd>0",
]
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').filter(OR(id__gte=100, id__eq=1))
self.assertIn(search.lex(), correct_qls)
def test_with_or_filters_and_fields(self):
correct_qls = [
"SELECT id, (id>=100 OR id=1) AS cnd FROM company WHERE MATCH('Yandex') AND cnd>0",
"SELECT id, (id=1 OR id>=0) AS cnd FROM company WHERE MATCH('Yandex') AND cnd>0",
]
search = Search(['company'], config=SearchConfig).select('id')
search = search.match('Yandex').filter(OR(id__gte=100, id__eq=1))
self.assertIn(search.lex(), correct_qls)
def test_with_params(self):
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').limit(0, 100).order_by('name', 'desc')
self.assertEqual(
search.lex(),
"SELECT * FROM company WHERE MATCH('Yandex') ORDER BY name DESC LIMIT 0,100"
)
def test_with_options(self):
search = Search(['company'], config=SearchConfig)
search = (
search
.match('Yandex')
.select('id', 'name')
.options(
ranker='proximity',
max_matches=100,
field_weights={'name': 100},
)
.order_by('name', 'desc')
)
correct_options_qls = [
"max_matches=100, ranker=proximity, field_weights=(name=100)",
"max_matches=100, field_weights=(name=100), ranker=proximity",
"ranker=proximity, field_weights=(name=100), max_matches=100",
"ranker=proximity, max_matches=100, field_weights=(name=100)",
"field_weights=(name=100), max_matches=100, ranker=proximity",
"field_weights=(name=100), ranker=proximity, max_matches=100",
]
correct_qls = [
" ".join((
"SELECT id, name FROM company WHERE MATCH('Yandex') ORDER BY name DESC OPTION",
opt
))
for opt in correct_options_qls
]
self.assertIn(search.lex(), correct_qls)
def test_with_double_match(self):
search = Search(['company'], config=SearchConfig)
search = search.match('ОАО').match('ТНК')
self.assertEqual(
search.lex(),
"SELECT * FROM company WHERE MATCH('ОАО ТНК')"
)
def test_with_time_filter(self):
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').filter(date_created__lte=datetime.date.today())
today = datetime.date.today()
sxql = (
"SELECT * FROM company WHERE MATCH('Yandex') "
"AND date_created<=%s" % unix_timestamp(today)
)
self.assertEqual(search.lex(), sxql)
def test_with_raw_attr(self):
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').select(RawAttr('@weight*10', 'skey'))
self.assertEqual(
search.lex(),
"SELECT @weight*10 AS skey FROM company WHERE MATCH('Yandex')"
)
def test_update_syntax(self):
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').update(products=(5,2)).filter(id__gt=1)
self.assertEqual(
search.lex(),
"UPDATE company SET products=(5,2) WHERE MATCH('Yandex') AND id>1"
)
def test_with_grouping(self):
search = Search(['company'], config=SearchConfig)
search = search.match('Yandex').select(Count()).group_by('date_created')
self.assertEqual(
search.lex(),
"SELECT COUNT(*) AS num FROM company WHERE MATCH('Yandex') GROUP BY date_created"
)
def test_with_multiindex(self):
search = Search(
indexes=['company', 'company_delta'],
config=SearchConfig
)
search = search.filter(id__gte=100)
self.assertEqual(
search.lex(),
"SELECT * FROM company, company_delta WHERE id>=100"
)
class TestSnippets(unittest.TestCase):
def test_simple(self):
snippets = (
Snippet(index='company', config=SearchConfig)
.for_query("Me amore")
.from_data("amore")
)
self.assertEqual(
snippets.lex(),
"CALL SNIPPETS ('amore', 'company', 'Me amore')"
)
def test_extended_1(self):
snippets = (
Snippet(index='company', config=SearchConfig)
.for_query("Me amore")
.from_data("amore", "amore mia")
)
self.assertEqual(
snippets.lex(),
"CALL SNIPPETS (('amore', 'amore mia'), 'company', 'Me amore')"
)
def test_extended_2(self):
snippets = (
Snippet(index='company', config=SearchConfig)
.for_query("Me amore")
.from_data("amore")
.from_data("me amore")
)
self.assertEqual(
snippets.lex(),
"CALL SNIPPETS (('amore', 'me amore'), 'company', 'Me amore')"
)
def test_with_options(self):
snippets = (
Snippet(index='company', config=SearchConfig)
.for_query("Me amore")
.from_data("amore mia")
.options(before_match='<strong>', after_match='</strong>')
)
self.assertEqual(
snippets.lex(), (
"CALL SNIPPETS ('amore mia', 'company', 'Me amore', "
"'<strong>' AS before_match, '</strong>' AS after_match)"
)
)
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.5/Lib/test/test_htmllib.py | 43 | 1937 | import formatter
import htmllib
import unittest
from test import test_support
class AnchorCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__anchors = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_anchor_info(self):
return self.__anchors
def anchor_bgn(self, *args):
self.__anchors.append(args)
class DeclCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__decls = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_decl_info(self):
return self.__decls
def unknown_decl(self, data):
self.__decls.append(data)
class HTMLParserTestCase(unittest.TestCase):
def test_anchor_collection(self):
# See SF bug #467059.
parser = AnchorCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<a href='http://foo.org/' name='splat'> </a>
<a href='http://www.python.org/'> </a>
<a name='frob'> </a>
""")
parser.close()
self.assertEquals(parser.get_anchor_info(),
[('http://foo.org/', 'splat', ''),
('http://www.python.org/', '', ''),
('', 'frob', ''),
])
def test_decl_collection(self):
# See SF patch #545300
parser = DeclCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<html>
<body>
hallo
<![if !supportEmptyParas]> <![endif]>
</body>
</html>
""")
parser.close()
self.assertEquals(parser.get_decl_info(),
["if !supportEmptyParas",
"endif"
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
| mit |
saymonpires/Iskrillex | core/tests.py | 4 | 1114 | """Copyright (C) <2014> <Saymon Pires da Silva>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA"""
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| gpl-3.0 |
tribeiro/chimera | src/chimera/util/tests/test_position.py | 3 | 2292 |
from nose.tools import assert_raises
from chimera.util.position import Position, Epoch
from chimera.util.coord import Coord
import ephem
from datetime import datetime as dt
from dateutil import tz
def equal (a, b, e=0.0001):
return ( abs(a-b) <= e)
class TestPosition (object):
def test_ra_dec (self):
p = Position.fromRaDec("10:00:00", "20 00 00")
assert p.dd() == (150, 20)
assert_raises(ValueError, Position.fromRaDec, "xyz", "abc")
def test_alt_az (self):
p = Position.fromAltAz("60", "200")
assert p.dd() == (60, 200)
assert_raises(ValueError, Position.fromAltAz, "xyz", "abc")
def test_long_lat (self):
p = Position.fromLongLat("-27 30", "-48 00")
assert p.dd() == (-27.5, -48.0)
assert_raises(ValueError, Position.fromLongLat, "xyz", "abc")
def test_galactic (self):
p = Position.fromGalactic("-27 30", "-48 00")
assert p.dd() == (-27.5, -48.0)
assert_raises(ValueError, Position.fromGalactic, "xyz", "abc")
def test_ecliptic (self):
p = Position.fromEcliptic("-27 30", "-48 00")
assert p.dd() == (-27.5, -48.0)
assert_raises(ValueError, Position.fromEcliptic, "xyz", "abc")
def test_altAzRaDec(self):
altAz = Position.fromAltAz('20:30:40', '222:11:00')
lat = Coord.fromD(0)
o = ephem.Observer()
o.lat = '0:0:0'
o.long = '0:0:0'
o.date = dt.now(tz.tzutc())
lst = float(o.sidereal_time())
raDec = Position.altAzToRaDec(altAz, lat, lst)
altAz2 = Position.raDecToAltAz(raDec, lat, lst)
assert equal(altAz.alt.toR(),altAz2.alt.toR()) & equal(altAz.az.toR(),altAz2.az.toR())
def test_distances(self):
p1 = Position.fromRaDec("10:00:00", "0:0:0")
p2 = Position.fromRaDec("12:00:00", "0:0:0")
d = p1.angsep(p2)
assert p1.within(p2, Coord.fromD(29.99)) == False
assert p1.within(p2, Coord.fromD(30.01)) == True
def test_changeEpoch(self):
sirius_j2000 = Position.fromRaDec("06 45 08.9173", "-16 42 58.017")
sirius_now = sirius_j2000.toEpoch(epoch=Epoch.NOW)
print
print sirius_j2000
print sirius_now
| gpl-2.0 |
roadmapper/ansible | test/units/modules/network/fortios/test_fortios_system_replacemsg_traffic_quota.py | 21 | 8643 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_replacemsg_traffic_quota
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_replacemsg_traffic_quota.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_replacemsg_traffic_quota_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_traffic_quota': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_traffic_quota_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_traffic_quota': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_traffic_quota_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_traffic_quota': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_traffic_quota_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_traffic_quota': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_traffic_quota_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_traffic_quota': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_replacemsg_traffic_quota_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_traffic_quota': {
'random_attribute_not_valid': 'tag',
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_traffic_quota.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'traffic-quota', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
Bloodyaugust/sugarlabcppboilerplate | lib/boost/tools/build/test/expansion.py | 6 | 1766 | #!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("a.cpp", """
#ifdef CF_IS_OFF
int main() {}
#endif
""")
t.write("b.cpp", """
#ifdef CF_1
int main() {}
#endif
""")
t.write("c.cpp", """
#ifdef FOO
int main() {}
#endif
""")
t.write("jamfile.jam", """
# See if default value of composite feature 'cf' will be expanded to
# <define>CF_IS_OFF.
exe a : a.cpp ;
# See if subfeature in requirements in expanded.
exe b : b.cpp : <cf>on-1 ;
# See if conditional requirements are recursively expanded.
exe c : c.cpp : <toolset>$toolset:<variant>release <variant>release:<define>FOO
;
""")
t.write("jamroot.jam", """
import feature ;
feature.feature cf : off on : composite incidental ;
feature.compose <cf>off : <define>CF_IS_OFF ;
feature.subfeature cf on : version : 1 2 : composite optional incidental ;
feature.compose <cf-on:version>1 : <define>CF_1 ;
""")
t.expand_toolset("jamfile.jam")
t.run_build_system()
t.expect_addition(["bin/$toolset/debug/a.exe",
"bin/$toolset/debug/b.exe",
"bin/$toolset/release/c.exe"])
t.rm("bin")
# Test for issue BB60.
t.write("test.cpp", """
#include "header.h"
int main() {}
""")
t.write("jamfile.jam", """
project : requirements <toolset>$toolset:<include>foo ;
exe test : test.cpp : <toolset>$toolset ;
""")
t.expand_toolset("jamfile.jam")
t.write("foo/header.h", "\n")
t.write("jamroot.jam", "")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/test.exe")
t.cleanup()
| gpl-2.0 |
boisvert42/npr-puzzle-python | 2016/0417_musicals.py | 1 | 2370 | '''
NPR Puzzle 2016-04-17
http://www.npr.org/2016/04/17/474509250/what-do-you-get-when-a-city-slicker-drops-his-vowels
Here's a tricky challenge from Sandy Weisz of Chicago.
Take the name of a famous musical. Write it in upper-
and lowercase letters, as you usually would.
Now turn one of the characters upside-down
and move it to another place in the title.
The result will be the last name of a well-known
stage performer. What is the musical,
and who is the performer?
'''
import sys
sys.path.append('..')
from nprcommontools import wikipedia_category_members
from collections import defaultdict
import re
#%%
def flip_letter(c):
'''
Flip a character upside-down; return None if impossible
'''
flip_dict = {'a':'e','b':'q','d':'p',\
'e':'a','h':'y','m':'w','n':'u','p':'d',\
'q':'b','r':'J','u':'n','w':'m','y':'h',\
'J':'r','M':'W','P':'d','W':'M','!':'i'}
try:
return flip_dict[c]
except KeyError:
return None
# Get a list of musicals from Wikipedia
musicals = wikipedia_category_members('Broadway_musicals')
#musicals = musicals.union(wikipedia_category_members('Off-Broadway_musicals'))
#musicals = musicals.union(wikipedia_category_members('American musical films'))
# Get a list of famous names
names = defaultdict(list)
with open(r'../wordlists/FamousNames.txt','rb') as fid:
for line in fid.readlines():
line = line.strip()
name,score = line.split('\t')
score = int(score)
if score >= 90:# and ' ' in name:
names[name.lower().split(' ')[-1]].append(name)
name_set = frozenset(names.keys())
#%%
# Go through musicals and look for names that fit
for musical in musicals:
musical_nospace = re.sub(r'[^A-Za-z!]+','',musical)
for i in range(len(musical_nospace)):
letter = musical_nospace[i]
if flip_letter(letter) is not None:
l2 = flip_letter(letter).lower()
musical_letter_missing = musical_nospace[:i]+musical_nospace[i+1:]
musical_letter_missing = musical_letter_missing.lower()
for j in range(len(musical_nospace)):
name = musical_letter_missing[:j] + l2 + musical_letter_missing[j:]
#print name
if name in name_set:
print '{0} -> {1} ({2})'.format(musical,name,', '.join(names[name])) | cc0-1.0 |
qk4l/Flexget | flexget/api/core/server.py | 3 | 19094 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import copy
import base64
import os
import json
import sys
import logging
import threading
import traceback
from time import sleep
import binascii
import cherrypy
import yaml
from flask import Response, jsonify, request
from flask_restplus import inputs
from flexget.utils.tools import get_latest_flexget_version_number
from pyparsing import Word, Keyword, Group, Forward, Suppress, OneOrMore, oneOf, White, restOfLine, ParseException, \
Combine
from pyparsing import nums, alphanums, printables
from yaml.error import YAMLError
from flexget._version import __version__
from flexget.api import api, APIResource
from flexget.api.app import __version__ as __api_version__, APIError, BadRequest, base_message, success_response, \
base_message_schema, \
empty_response, etag
log = logging.getLogger('api.server')
server_api = api.namespace('server', description='Manage Daemon')
class ObjectsContainer(object):
yaml_error_response = copy.deepcopy(base_message)
yaml_error_response['properties']['column'] = {'type': 'integer'}
yaml_error_response['properties']['line'] = {'type': 'integer'}
yaml_error_response['properties']['reason'] = {'type': 'string'}
config_validation_error = copy.deepcopy(base_message)
config_validation_error['properties']['error'] = {'type': 'string'}
config_validation_error['properties']['config_path'] = {'type': 'string'}
pid_object = {
'type': 'object',
'properties': {
'pid': {'type': 'integer'}
}
}
raw_config_object = {
'type': 'object',
'properties': {
'raw_config': {'type': 'string'}
}
}
version_object = {
'type': 'object',
'properties': {
'flexget_version': {'type': 'string'},
'api_version': {'type': 'string'},
'latest_version': {'type': ['string', 'null']}
}
}
dump_threads_object = {
'type': 'object',
'properties': {
'threads': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'id': {'type': 'string'},
'dump': {
'type': 'array',
'items': {'type': 'string'}
}
},
},
}
}
}
server_manage = {
'type': 'object',
'properties': {
'operation': {'type': 'string', 'enum': ['reload', 'shutdown']},
'force': {'type': 'boolean'}
},
'required': ['operation'],
'additionalProperties': False
}
yaml_error_schema = api.schema_model('yaml_error_schema', ObjectsContainer.yaml_error_response)
config_validation_schema = api.schema_model('config_validation_schema', ObjectsContainer.config_validation_error)
pid_schema = api.schema_model('server.pid', ObjectsContainer.pid_object)
raw_config_schema = api.schema_model('raw_config', ObjectsContainer.raw_config_object)
version_schema = api.schema_model('server.version', ObjectsContainer.version_object)
dump_threads_schema = api.schema_model('server.dump_threads', ObjectsContainer.dump_threads_object)
server_manage_schema = api.schema_model('server.manage', ObjectsContainer.server_manage)
@server_api.route('/manage/')
class ServerReloadAPI(APIResource):
@api.validate(server_manage_schema)
@api.response(501, model=yaml_error_schema, description='YAML syntax error')
@api.response(502, model=config_validation_schema, description='Config validation error')
@api.response(200, model=base_message_schema)
def post(self, session=None):
""" Manage server operations """
data = request.json
if data['operation'] == 'reload':
try:
self.manager.load_config(output_to_console=False)
except YAMLError as e:
if hasattr(e, 'problem') and hasattr(e, 'context_mark') and hasattr(e, 'problem_mark'):
error = {}
if e.problem is not None:
error.update({'reason': e.problem})
if e.context_mark is not None:
error.update({'line': e.context_mark.line, 'column': e.context_mark.column})
if e.problem_mark is not None:
error.update({'line': e.problem_mark.line, 'column': e.problem_mark.column})
raise APIError(message='Invalid YAML syntax', payload=error)
except ValueError as e:
errors = []
for er in e.errors:
errors.append({'error': er.message,
'config_path': er.json_pointer})
raise APIError('Error loading config: %s' % e.args[0], payload={'errors': errors})
response = 'Config successfully reloaded from disk'
else:
self.manager.shutdown(data.get('force'))
response = 'Shutdown requested'
return success_response(response)
@server_api.route('/pid/')
class ServerPIDAPI(APIResource):
@api.response(200, description='Reloaded config', model=pid_schema)
def get(self, session=None):
""" Get server PID """
return jsonify({'pid': os.getpid()})
@server_api.route('/config/')
class ServerConfigAPI(APIResource):
@etag
@api.response(200, description='Flexget config', model=empty_response)
def get(self, session=None):
""" Get Flexget Config in JSON form"""
return jsonify(self.manager.config)
@server_api.route('/raw_config/')
class ServerRawConfigAPI(APIResource):
@etag
@api.doc(description='Return config file encoded in Base64')
@api.response(200, model=raw_config_schema, description='Flexget raw YAML config file encoded in Base64')
def get(self, session=None):
""" Get raw YAML config file """
with open(self.manager.config_path, 'r', encoding='utf-8') as f:
raw_config = base64.b64encode(f.read().encode("utf-8"))
return jsonify(raw_config=raw_config.decode('utf-8'))
@api.validate(raw_config_schema)
@api.response(200, model=base_message_schema, description='Successfully updated config')
@api.response(BadRequest)
@api.response(APIError)
@api.doc(description='Config file must be base64 encoded. A backup will be created, and if successful config will'
' be loaded and saved to original file.')
def post(self, session=None):
""" Update config """
data = request.json
try:
raw_config = base64.b64decode(data['raw_config'])
except (TypeError, binascii.Error):
raise BadRequest(message='payload was not a valid base64 encoded string')
try:
config = yaml.safe_load(raw_config)
except YAMLError as e:
if hasattr(e, 'problem') and hasattr(e, 'context_mark') and hasattr(e, 'problem_mark'):
error = {}
if e.problem is not None:
error.update({'reason': e.problem})
if e.context_mark is not None:
error.update({'line': e.context_mark.line, 'column': e.context_mark.column})
if e.problem_mark is not None:
error.update({'line': e.problem_mark.line, 'column': e.problem_mark.column})
raise BadRequest(message='Invalid YAML syntax', payload=error)
try:
backup_path = self.manager.update_config(config)
except ValueError as e:
errors = []
for er in e.errors:
errors.append({'error': er.message,
'config_path': er.json_pointer})
raise BadRequest(message='Error loading config: %s' % e.args[0], payload={'errors': errors})
try:
self.manager.backup_config()
except Exception as e:
raise APIError(message='Failed to create config backup, config updated but NOT written to file',
payload={'reason': str(e)})
try:
with open(self.manager.config_path, 'w', encoding='utf-8') as f:
f.write(raw_config.decode('utf-8').replace('\r\n', '\n'))
except Exception as e:
raise APIError(message='Failed to write new config to file, please load from backup',
payload={'reason': str(e), 'backup_path': backup_path})
return success_response('Config was loaded and successfully updated to file')
@server_api.route('/version/')
@api.doc(description='In case of a request error when fetching latest flexget version, that value will return as null')
class ServerVersionAPI(APIResource):
@api.response(200, description='Flexget version', model=version_schema)
def get(self, session=None):
""" Flexget Version """
latest = get_latest_flexget_version_number()
return jsonify({'flexget_version': __version__,
'api_version': __api_version__,
'latest_version': latest})
@server_api.route('/dump_threads/', doc=False)
class ServerDumpThreads(APIResource):
@api.response(200, description='Flexget threads dump', model=dump_threads_schema)
def get(self, session=None):
""" Dump Server threads for debugging """
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
threads = []
for threadId, stack in sys._current_frames().items():
dump = []
for filename, lineno, name, line in traceback.extract_stack(stack):
dump.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
dump.append(line.strip())
threads.append({
'name': id2name.get(threadId),
'id': threadId,
'dump': dump
})
return jsonify(threads=threads)
server_log_parser = api.parser()
server_log_parser.add_argument('lines', type=int, default=200, help='How many lines to find before streaming')
server_log_parser.add_argument('search', help='Search filter support google like syntax')
def reverse_readline(fh, start_byte=0, buf_size=8192):
"""a generator that returns the lines of a file in reverse order"""
segment = None
offset = 0
if start_byte:
fh.seek(start_byte)
else:
fh.seek(0, os.SEEK_END)
total_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(total_size, offset + buf_size)
fh.seek(-offset, os.SEEK_END)
buf = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buf.decode(sys.getfilesystemencoding()).split('\n')
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concact the segment to the last line of new chunk
# instead, yield the segment first
if buf[-1] is not '\n':
lines[-1] += segment
else:
yield segment
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
if len(lines[index]):
yield lines[index]
yield segment
def file_inode(filename):
try:
fd = os.open(filename, os.O_RDONLY)
inode = os.fstat(fd).st_ino
return inode
except OSError:
return 0
finally:
if fd:
os.close(fd)
@server_api.route('/log/')
class ServerLogAPI(APIResource):
@api.doc(parser=server_log_parser)
@api.response(200, description='Streams as line delimited JSON')
def get(self, session=None):
""" Stream Flexget log Streams as line delimited JSON """
args = server_log_parser.parse_args()
def follow(lines, search):
log_parser = LogParser(search)
stream_from_byte = 0
lines_found = []
if os.path.isabs(self.manager.options.logfile):
base_log_file = self.manager.options.logfile
else:
base_log_file = os.path.join(self.manager.config_base, self.manager.options.logfile)
yield '{"stream": [' # Start of the json stream
# Read back in the logs until we find enough lines
for i in range(0, 9):
log_file = ('%s.%s' % (base_log_file, i)).rstrip('.0') # 1st log file has no number
if not os.path.isfile(log_file):
break
with open(log_file, 'rb') as fh:
fh.seek(0, 2) # Seek to bottom of file
end_byte = fh.tell()
if i == 0:
stream_from_byte = end_byte # Stream from this point later on
if len(lines_found) >= lines:
break
# Read in reverse for efficiency
for line in reverse_readline(fh, start_byte=end_byte):
if len(lines_found) >= lines:
break
if log_parser.matches(line):
lines_found.append(log_parser.json_string(line))
for l in reversed(lines_found):
yield l + ',\n'
# We need to track the inode in case the log file is rotated
current_inode = file_inode(base_log_file)
while True:
# If the server is shutting down then end the stream nicely
if cherrypy.engine.state != cherrypy.engine.states.STARTED:
break
new_inode = file_inode(base_log_file)
if current_inode != new_inode:
# File updated/rotated. Read from beginning
stream_from_byte = 0
current_inode = new_inode
try:
with open(base_log_file, 'rb') as fh:
fh.seek(stream_from_byte)
line = fh.readline().decode(sys.getfilesystemencoding())
stream_from_byte = fh.tell()
except IOError:
yield '{}'
continue
# If a valid line is found and does not pass the filter then set it to none
line = log_parser.json_string(line) if log_parser.matches(line) else '{}'
if line == '{}':
# If no match then delay to prevent many read hits on the file
sleep(2)
yield line + ',\n'
yield '{}]}' # End of stream
return Response(follow(args['lines'], args['search']), mimetype='text/event-stream')
class LogParser(object):
"""
Filter log file.
Supports
* 'and', 'or' and implicit 'and' operators;
* parentheses;
* quoted strings;
"""
def __init__(self, query):
self._methods = {
'and': self.evaluate_and,
'or': self.evaluate_or,
'not': self.evaluate_not,
'parenthesis': self.evaluate_parenthesis,
'quotes': self.evaluate_quotes,
'word': self.evaluate_word,
}
self.line = ''
self.query = query.lower() if query else ''
if self.query:
# TODO: Cleanup
operator_or = Forward()
operator_word = Group(Word(alphanums)).setResultsName('word')
operator_quotes_content = Forward()
operator_quotes_content << (
(operator_word + operator_quotes_content) | operator_word
)
operator_quotes = Group(
Suppress('"') + operator_quotes_content + Suppress('"')
).setResultsName('quotes') | operator_word
operator_parenthesis = Group(
(Suppress('(') + operator_or + Suppress(")"))
).setResultsName('parenthesis') | operator_quotes
operator_not = Forward()
operator_not << (Group(
Suppress(Keyword('no', caseless=True)) + operator_not
).setResultsName('not') | operator_parenthesis)
operator_and = Forward()
operator_and << (Group(
operator_not + Suppress(Keyword('and', caseless=True)) + operator_and
).setResultsName('and') | Group(
operator_not + OneOrMore(~oneOf('and or') + operator_and)
).setResultsName('and') | operator_not)
operator_or << (Group(
operator_and + Suppress(Keyword('or', caseless=True)) + operator_or
).setResultsName('or') | operator_and)
self._query_parser = operator_or.parseString(self.query)[0]
else:
self._query_parser = False
time_cmpnt = Word(nums).setParseAction(lambda t: t[0].zfill(2))
date = Combine((time_cmpnt + '-' + time_cmpnt + '-' + time_cmpnt) + ' ' + time_cmpnt + ':' + time_cmpnt)
word = Word(printables)
self._log_parser = (
date.setResultsName('timestamp') +
word.setResultsName('log_level') +
word.setResultsName('plugin') +
(
White(min=16).setParseAction(lambda s, l, t: [t[0].strip()]).setResultsName('task') |
(White(min=1).suppress() & word.setResultsName('task'))
) +
restOfLine.setResultsName('message')
)
def evaluate_and(self, argument):
return self.evaluate(argument[0]) and self.evaluate(argument[1])
def evaluate_or(self, argument):
return self.evaluate(argument[0]) or self.evaluate(argument[1])
def evaluate_not(self, argument):
return not self.evaluate(argument[0])
def evaluate_parenthesis(self, argument):
return self.evaluate(argument[0])
def evaluate_quotes(self, argument):
search_terms = [term[0] for term in argument]
return ' '.join(search_terms) in ' '.join(self.line.split())
def evaluate_word(self, argument):
return argument[0] in self.line
def evaluate(self, argument):
return self._methods[argument.getName()](argument)
def matches(self, line):
if not line:
return False
self.line = line.lower()
if not self._query_parser:
return True
else:
return self.evaluate(self._query_parser)
def json_string(self, line):
try:
return json.dumps(self._log_parser().parseString(line).asDict())
except ParseException:
return '{}'
| mit |
Guzi219/Python | com/github/crawlspider/eng-website/englishDownloadWithBrowserUI.py | 1 | 15021 | # -*- coding: utf-8 -*-
import datetime
import os
import string
import sys
import thread
import time
import urllib
import urllib2
import requests
from BeautifulSoup import BeautifulSoup
from requests.exceptions import MissingSchema, ReadTimeout
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from DriverSingleton import Driver
from com.github.utils.IniFileUtil import INIFILE
# ----------- 加载处理糗事百科 -----------
class Spider_Model:
def __init__(self):
self.page = 1
self.pages = []
self.enable = False
self.canLoad = True # sub thread can run?
self.allDone = False
self.store_dir = None
self.init_work_dir()
self.isFirst = True # run only once
self.unload_page_num = 0 # page to be loaded
self.proFileName = 'english_download.ini'
self.defaultSearch = {'wd':'2018 高一 英语 试题 试卷'}
self.filterWords = [u'英语', u'高一', u'试', '2018']
# 主域名
self.domainName = 'http://www.well1000.cn'
# 模拟浏览器
self.webDriverPage = Driver()
# 第一个窗口
self.windowOpenNum = 0
# 从第二个开始关闭
#
def setDefaultCharset(self):
reload(sys)
sys.setdefaultencoding('utf-8')
# init the storage dir = 'tmp /'
def init_work_dir(self):
retval = os.getcwd()
print '#current dir is : ' + retval
# 图片存放路径
store_dir = retval + os.sep + 'tmp'
print '#all imgs are going to be stored in dir :' + store_dir
if not os.path.exists(store_dir):
print '#tmp dir does not exist, attemp to mkdir'
os.mkdir(store_dir)
print '#mkdir sucessfully'
else:
print '#tmp dir is already exist'
self.store_dir = store_dir
# print '#now change current dir to tmp'
# os.chdir(store_dir) #no neccessary
# print os.getcwd()
def print_commet(self):
print '==================================='
# 获取当前时间
def now_date(self):
# 获得当前时间
now = datetime.datetime.now() # ->这是时间数组格式
# 转换为指定的格式:
formateDate = now.strftime("%Y%m%d%H%M%S")
return formateDate
# 显示图片后缀名
def file_extension(self, url):
# get filename
filename = os.path.basename(url)
ext = os.path.splitext(filename)[1]
return ext
# 保存图片
def saveFile(self, url, page, idx):
user_define_name = self.now_date() + '_p_' + str(page) + '_' + string.zfill(idx, 2) # 补齐2位
file_ext = self.file_extension(url) # 后缀名
save_file_name = user_define_name + "_" + file_ext
# 不能保存,改用open方法
# urllib.urlretrieve(item[0], self.save_path + save_file_name)
# 保存图片
url = self.CheckUrlValidate(url)
try:
pic = requests.get(url, timeout=10)
f = open(self.store_dir + os.sep + save_file_name, 'wb')
f.write(pic.content)
f.close()
print '\ndone save file ' + save_file_name
except ReadTimeout:
print 'save file %s failed. cause by timeout(10)' % (save_file_name)
except MissingSchema:
print 'invalid url %s' % (url)
except Exception, e:
print e
# 保存文档
def saveDocFile(self, doc, url, fileName):
try:
f = open(self.store_dir + os.sep + fileName, 'wb')
f.write(doc.content)
f.close()
print '\ndone save file ' + fileName
except ReadTimeout:
print 'save file %s failed. cause by timeout(10)' % (fileName)
except MissingSchema:
print 'invalid url %s' % (url)
except Exception, e:
print e
# 检查url是否包括http:协议
def CheckUrlValidate(self, url):
print url
if not url.startswith('http') and url.startswith("//"):
url = "http:" + url
return url
# 抓取单个页码中试题下载路径
# 模拟浏览器打开
def GetFileDownloadPath(self, pageUrl):
pageUrl = self.domainName + pageUrl
# 打开页面
self.webDriverPage.start(pageUrl)
self.retryClick(pageUrl)
time.sleep(1.22)
# ctrl+t 打开新标签页
# self.webDriverPage.find_element(By.TAG_NAME, 'body').send_keys(Keys.COMMAND + 't')
# self.webDriverPage.find_element(By.TAG_NAME, 'body').send_keys(Keys.COMMAND + 't')
# self.webDriverPage.close()
# 解决:StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
def retryClick(self, pageUrl):
result = False
attempts = 0
while attempts < 3:
try:
# 多歇会儿,重新来
time.sleep(3.333)
if attempts > 0:
print 'reload ' + pageUrl
self.webDriverPage.start(pageUrl)
# 模拟滚动条
self.webDriverPage.driver.execute_script("window.scrollTo(0,1050)")
time.sleep(1.111)
link = self.webDriverPage.find_element(By.CSS_SELECTOR, 'div#biao a')
# 模拟点击链接下载
link.click()
print('downloading ' + link.text)
result = True
break
except StaleElementReferenceException, e:
# print self.webDriverPage.driver.page_source
print e.message
except Exception, e:
print e.message
attempts += 1
return result
# 将所有的段子都扣出来,添加到列表中并且返回列表
def GetPage(self, page):
site_url = 'http://www.well1000.cn/so/search_well.aspx?' + urllib.urlencode(self.defaultSearch)
myUrl = site_url + '&pg=' + page
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}
req = urllib2.Request(myUrl, headers=headers)
print u'\n----background----now loading: page/' + page
myResponse = urllib2.urlopen(req, data=None, timeout=10)
# 检测网页编码
# print chardet.detect(myResponse.read())
# 将获取的字符串strTxt做decode时,指明ignore,会忽略非gb2312编码的字符
myPage = myResponse.read()
# .decode('gb2312','ignore').encode('utf-8')
# encode的作用是将unicode编码转换成其他编码的字符串
# decode的作用是将其他编码的字符串转换成unicode编码
unicodePage = myPage.decode("utf-8")
# remove emoji[when this page get emoji inside, it'll run error:unichr() arg not in range(0x10000) (narrow Python build)]
# unicodePage= emojiutil.remove_define_emoji(unicodePage)
# only do it once
if self.isFirst:
self.isFirst = False
print '====================get the max page number===================='
self.GetTotalPage(unicodePage)
# if self.unload_page_num > int(page):
# print 'already done load all new page, stop thread now.'
# self.enable = False
# os._exit(0)
link_soups = BeautifulSoup(unicodePage)
# print(link_soups)
# 格式如下(每页20个)
# <a href="/stdown/yingyu/122541.html" target="_blank"><font color="#FF0000">2018</font>年<font color="#FF0000">中考</font><font color="#FF0000">英语</font>一轮教材复习精练(<font color="#FF0000">九年级</font>上module11~12)有答案</a>
articleUrls = link_soups.findAll('a', attrs={'target': '_blank'})
myItems = [] # list: store the tup(src, alt)
for oneArticleUrl in articleUrls:
try:
articleText = oneArticleUrl.text
# 过滤掉非 英语 文章
for filterWord in self.filterWords:
flag = True # 是否通过
if articleText.find(filterWord) == -1:
print 'pass no english article: ' + articleText
flag = False
break
if flag:
tup1 = (oneArticleUrl['href'], articleText)
myItems.append(tup1)
except KeyError, e: # if u can't get key attr
print 'KeyError', e
except Exception, e:
print 'Other Error', e
return myItems
# get the max page number
def GetTotalPage(self, html):
# create the BeautifulSoup
some_soup = BeautifulSoup(html)
# get the page div
ele_a = some_soup.find('td', attrs={'class': 'linear'})
# get the last div>a text='末页'
last_a = ele_a.findAll('em')[-1]
# get total num
total_num = ele_a.findAll('em')[1]
print ('共计查询结果:' + total_num.text.encode('utf-8'))
# substr 0:.html
pagenum = last_a.text # .get('href')[:-5]
print ('共计页码 : ' + pagenum.encode('utf-8'))
# print type(last_a)
self.SaveTotalPageToFile(pagenum)
# store the max page number to totalpage.ini
# new_page_num: new max page num
def SaveTotalPageToFile(self, new_page_num):
print '====================save the totalpage to totalpage.ini===================='
iniFile = INIFILE(self.proFileName)
if not iniFile.initflag:
print 'class initializing failed. check the [%s] file first.' % (self.proFileName)
os._exit(0)
loaded_page_num = iniFile.GetValue('Main', 'loadedpage')
print '====================the loaded_page_num is [%s], the new_page_num is [%s]====================' % (loaded_page_num, new_page_num)
iniFile.SetValue('SUM', 'totalpage', new_page_num).EndWrite().UnInit()
if int(new_page_num) >= int(loaded_page_num): # if there is new page
# self.unload_page_num = int(new_page_num) - int(loaded_page_num)
self.unload_page_num = int(new_page_num) - int(loaded_page_num)
if self.unload_page_num == 0: # 页码未增加,但是图片新增了
self.unload_page_num = 1
elif self.unload_page_num > 0: # 增加新页面了,但是旧页上图片存在未下载的情况***会导致下载不会结束
self.unload_page_num += 1
print 'since we start at page %s, we still got (%s) pages to load.' % (
self.page, self.unload_page_num)
else: # nothing new, stop main thread
print 'Oops! Nothing new. exit main thread now.'
os._exit(0) # terminal sub thread
self.enable = False # terminal main thread
# 用于加载新的段子
def LoadPage(self):
# 如果用户未输入:q则一直运行
while self.canLoad:
# 如果pages数组中的内容小于2个
# print '\n----background----self.pages length: ' + str(len(self.pages))
# 预加载2页数据
if len(self.pages) < 2:
try:
# 获取新的页面
myPage = self.GetPage(str(self.page))
self.page += 1
self.pages.append(myPage)
# print 'self.pages ' + str(len(self.pages))
self.unload_page_num = self.unload_page_num - 1
print '====================%s============%s' % (self.unload_page_num, self.page)
# if self.unload_page_num <= self.page:
if self.unload_page_num < 1:
print 'already load all new page, stop sub thread now.'
self.canLoad = False # let this thread do nothing
self.allDone = True
except Exception, e:
print e
else:
time.sleep(1)
# print '\n----background----pause and wait.'
# print '\n----background----sleep 2s, do not request too fast.'
time.sleep(2) # sleep 2s for test
# show one page after press enter button.
def ShowOnePage(self, now_page_items, page):
for idx, item in enumerate(now_page_items):
print "\nopen " + item[0]
time.sleep(1.666)
self.GetFileDownloadPath(item[0])
# print '========one page done.================='
print '========Please hit the Enter.================='
if self.allDone:
print 'Nothing left. Now close this application.'
# self.enable = False #let the main thread know it's time to quit
os._exit(0) # can teminal main thread.
#当前页码
iniFile = INIFILE(self.proFileName)
if iniFile.initflag:
iniFile.SetValue('Main', 'loadedpage', page).EndWrite().UnInit()
# 输出一页后暂停
time.sleep(1)
print 'take a snap for 1s.'
# 手动抓取
# myInput = raw_input()
# if myInput == ":q":
# self.CleanRepeatImage() #if break manually, must clean work dir.
# self.enable = False
#decide which page to start
def whichPage2Start(self):
file = INIFILE(self.proFileName)
if file.initflag:
startPage = file.GetValue('Main', 'loadedpage', 1)
self.page = int(startPage)
def Start(self):
self.enable = True
page = self.page
print u'正在加载中请稍候......'
# 新建一个线程在后台加载段子并存储
thread.start_new_thread(self.LoadPage, ())
time.sleep(2) # wait the sub thread to be done.
# ----------- 加载处理糗事百科 -----------
while self.enable:
# 如果self的page数组中存有元素
if len(self.pages) > 0:
now_page_items = self.pages[0]
# del now page items
del self.pages[0]
print '---main thred --', page
self.ShowOnePage(now_page_items, page)
page += 1
print self.enable # ----------- 程序的入口处 -----------
print u"""
---------------------------------------
程序:英语网站--爬虫
版本:2.7
作者:guzi
日期:2018年5月26日
语言:Python 2.7
操作:输入:q退出
功能:按下回车依次浏览
---------------------------------------
"""
myModel = Spider_Model()
print u'请按下回车浏览英语下载网站:'
myModel.whichPage2Start()
raw_input(' ')
# myModel.page=913 #start from which page, default 1
myModel.Start()
# myModel.CleanRepeatImage()
| mit |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/ctypes/test/test_stringptr.py | 66 | 2504 | import unittest
from ctypes import *
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class StringPtrTestCase(unittest.TestCase):
def test__POINTER_c_char(self):
class X(Structure):
_fields_ = [("str", POINTER(c_char))]
x = X()
# NULL pointer access
self.assertRaises(ValueError, getattr, x.str, "contents")
b = c_buffer("Hello, World")
from sys import getrefcount as grc
self.failUnlessEqual(grc(b), 2)
x.str = b
self.failUnlessEqual(grc(b), 3)
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
for i in range(len(b)):
self.failUnlessEqual(b[i], x.str[i])
self.assertRaises(TypeError, setattr, x, "str", "Hello, World")
def test__c_char_p(self):
class X(Structure):
_fields_ = [("str", c_char_p)]
x = X()
# c_char_p and Python string is compatible
# c_char_p and c_buffer is NOT compatible
self.failUnlessEqual(x.str, None)
x.str = "Hello, World"
self.failUnlessEqual(x.str, "Hello, World")
b = c_buffer("Hello, World")
self.failUnlessRaises(TypeError, setattr, x, "str", b)
def test_functions(self):
strchr = lib.my_strchr
strchr.restype = c_char_p
# c_char_p and Python string is compatible
# c_char_p and c_buffer are now compatible
strchr.argtypes = c_char_p, c_char
self.failUnlessEqual(strchr("abcdef", "c"), "cdef")
self.failUnlessEqual(strchr(c_buffer("abcdef"), "c"), "cdef")
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
strchr.argtypes = POINTER(c_char), c_char
buf = c_buffer("abcdef")
self.failUnlessEqual(strchr(buf, "c"), "cdef")
self.failUnlessEqual(strchr("abcdef", "c"), "cdef")
# XXX These calls are dangerous, because the first argument
# to strchr is no longer valid after the function returns!
# So we must keep a reference to buf separately
strchr.restype = POINTER(c_char)
buf = c_buffer("abcdef")
r = strchr(buf, "c")
x = r[0], r[1], r[2], r[3], r[4]
self.failUnlessEqual(x, ("c", "d", "e", "f", "\000"))
del buf
# x1 will NOT be the same as x, usually:
x1 = r[0], r[1], r[2], r[3], r[4]
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
tiredtyrant/CloudBot | plugins/minecraft_ping.py | 4 | 2219 | import socket
from mcstatus import MinecraftServer
from cloudbot import hook
mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'),
('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'),
('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'),
('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'),
('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', ''), ('\xa7m', '\x13'),
('\xa7r', '\x0f'), ('\xa7n', '\x15')]
def format_colors(description):
for original, replacement in mc_colors:
description = description.replace(original, replacement)
return description.replace("\xa7k", "")
@hook.command("mcping", "mcp")
def mcping(text):
"""<server[:port]> - gets info about the Minecraft server at <server[:port]>"""
try:
server = MinecraftServer.lookup(text)
except (IOError, ValueError) as e:
return e
try:
s = server.status()
except socket.gaierror:
return "Invalid hostname"
except socket.timeout:
return "Request timed out"
except ConnectionRefusedError:
return "Connection refused"
except ConnectionError:
return "Connection error"
except (IOError, ValueError) as e:
return "Error pinging server: {}".format(e)
if isinstance(s.description, dict):
description = format_colors(" ".join(s.description["text"].split()))
else:
description = format_colors(" ".join(s.description.split()))
# I really hate people for putting colors IN THE VERSION
# WTF REALLY THIS IS A THING NOW?
if s.latency:
return "{}\x0f - \x02{}\x0f - \x02{:.1f}ms\x02" \
" - \x02{}/{}\x02 players".format(description, s.version.name_clean, s.latency,
s.players.online, s.players.max).replace("\n", "\x0f - ")
return "{}\x0f - \x02{}\x0f" \
" - \x02{}/{}\x02 players".format(description, s.version.name_clean,
s.players.online, s.players.max).replace("\n", "\x0f - ")
| gpl-3.0 |
DanMcInerney/clusterd | src/platform/jboss/deployers/dfs_deploy.py | 6 | 2476 | from src.platform.jboss.interfaces import JINTERFACES
from src.platform.jboss.authenticate import checkAuth
from src.module.deploy_utils import parse_war_path
from collections import OrderedDict
from os.path import abspath
from log import LOG
import utility
title = JINTERFACES.JMX
versions = ["3.2", "4.0", "4.2", "5.0", "5.1"]
def deploy(fingerengine, fingerprint):
""" Exploits the DeploymentFileRepository bean to deploy
a JSP to the remote server. Note that this requires a JSP,
not a packaged or exploded WAR.
"""
war_file = abspath(fingerengine.options.deploy)
war_name = parse_war_path(war_file)
if '.war' in war_file:
tmp = utility.capture_input("This deployer requires a JSP, default to cmd.jsp? [Y/n]")
if "n" in tmp.lower():
return
war_file = abspath("./src/lib/resources/cmd.jsp")
war_name = "cmd"
utility.Msg("Preparing to deploy {0}...".format(war_name))
url = "http://{0}:{1}/jmx-console/HtmlAdaptor".format(
fingerengine.options.ip, fingerprint.port)
data = OrderedDict([
('action', 'invokeOp'),
('name', 'jboss.admin:service=DeploymentFileRepository'),
('methodIndex', 5),
('arg0', war_file.replace('.jsp', '.war')),
('arg1', war_name),
('arg2', '.jsp'),
('arg3', open(war_file, 'r').read()),
('arg4', True)
])
response = utility.requests_post(url, data=data)
if response.status_code == 401:
utility.Msg("Host %s:%s requires auth for JMX, checking..." %
(fingerengine.options.ip, fingerprint.port), LOG.DEBUG)
cookies = checkAuth(fingerengine.options.ip, fingerprint.port,
fingerprint.title, fingerprint.version)
if cookies:
response = utility.requests_post(url, data=data, cookies=cookies[0],
auth=cookies[1])
else:
utility.Msg("Could not get auth for %s:%s" %
(fingerengine.options.ip, fingerprint.port), LOG.ERROR)
return
if response.status_code == 200:
utility.Msg("Successfully deployed '/{0}/{1}'".format(war_name, war_name + '.jsp'), LOG.SUCCESS)
else:
utility.Msg("Failed to deploy (HTTP %d)" % response.status_code, LOG.ERROR)
| mit |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/service.py | 1 | 7404 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmc
import xbmcgui
import xbmcaddon
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import utils
from salts_lib import utils2
from salts_lib.constants import MODES
from salts_lib.constants import TRIG_DB_UPG
from salts_lib.db_utils import DB_Connection
MAX_ERRORS = 10
log_utils.log('Service: Installed Version: %s' % (kodi.get_version()))
db_connection = DB_Connection()
if kodi.get_setting('use_remote_db') == 'false' or kodi.get_setting('enable_upgrade') == 'true':
if TRIG_DB_UPG:
db_version = db_connection.get_db_version()
else:
db_version = kodi.get_version()
db_connection.init_database(db_version)
class Service(xbmc.Player):
def __init__(self, *args, **kwargs):
log_utils.log('Service: starting...')
xbmc.Player.__init__(self, *args, **kwargs)
self.win = xbmcgui.Window(10000)
self.reset()
def reset(self):
log_utils.log('Service: Resetting...')
self.win.clearProperty('salts.playing')
self.win.clearProperty('salts.playing.trakt_id')
self.win.clearProperty('salts.playing.season')
self.win.clearProperty('salts.playing.episode')
self.win.clearProperty('salts.playing.srt')
self.win.clearProperty('salts.playing.trakt_resume')
self.win.clearProperty('salts.playing.salts_resume')
self.tracked = False
self._totalTime = 999999
self.trakt_id = None
self.season = None
self.episode = None
self._lastPos = 0
def onPlayBackStarted(self):
log_utils.log('Service: Playback started')
playing = self.win.getProperty('salts.playing') == 'True'
self.trakt_id = self.win.getProperty('salts.playing.trakt_id')
self.season = self.win.getProperty('salts.playing.season')
self.episode = self.win.getProperty('salts.playing.episode')
srt_path = self.win.getProperty('salts.playing.srt')
trakt_resume = self.win.getProperty('salts.playing.trakt_resume')
salts_resume = self.win.getProperty('salts.playing.salts_resume')
if playing: # Playback is ours
log_utils.log('Service: tracking progress...')
self.tracked = True
if srt_path:
log_utils.log('Service: Enabling subtitles: %s' % (srt_path))
self.setSubtitles(srt_path)
else:
self.showSubtitles(False)
self._totalTime = 0
while self._totalTime == 0:
try:
self._totalTime = self.getTotalTime()
except RuntimeError:
self._totalTime = 0
break
xbmc.sleep(1000)
if salts_resume:
log_utils.log("Salts Local Resume: Resume Time: %s Total Time: %s" % (salts_resume, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(float(salts_resume))
elif trakt_resume:
resume_time = float(trakt_resume) * self._totalTime / 100
log_utils.log("Salts Trakt Resume: Percent: %s, Resume Time: %s Total Time: %s" % (trakt_resume, resume_time, self._totalTime), log_utils.LOGDEBUG)
self.seekTime(resume_time)
def onPlayBackStopped(self):
log_utils.log('Service: Playback Stopped')
if self.tracked:
# clear the playlist if SALTS was playing and only one item in playlist to
# use playlist to determine playback method in get_sources
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if pl.size() == 1 and pl[0].getfilename().lower().startswith(plugin_url):
log_utils.log('Service: Clearing Single Item SALTS Playlist', log_utils.LOGDEBUG)
pl.clear()
playedTime = float(self._lastPos)
try: percent_played = int((playedTime / self._totalTime) * 100)
except: percent_played = 0 # guard div by zero
pTime = utils2.format_time(playedTime)
tTime = utils2.format_time(self._totalTime)
log_utils.log('Service: Played %s of %s total = %s%%' % (pTime, tTime, percent_played), log_utils.LOGDEBUG)
if playedTime == 0 and self._totalTime == 999999:
log_utils.log('Kodi silently failed to start playback', log_utils.LOGWARNING)
elif playedTime >= 5:
if percent_played <= 98:
log_utils.log('Service: Setting bookmark on |%s|%s|%s| to %s seconds' % (self.trakt_id, self.season, self.episode, playedTime), log_utils.LOGDEBUG)
db_connection.set_bookmark(self.trakt_id, playedTime, self.season, self.episode)
if percent_played >= 75:
if xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
run = 'RunScript(script.trakt, action=sync, silent=True)'
xbmc.executebuiltin(run)
self.reset()
def onPlayBackEnded(self):
log_utils.log('Service: Playback completed')
self.onPlayBackStopped()
monitor = Service()
utils.do_startup_task(MODES.UPDATE_SUBS)
was_on = False
def disable_global_cx():
global was_on
if xbmc.getCondVisibility('System.HasAddon(plugin.program.super.favourites)'):
active_plugin = xbmc.getInfoLabel('Container.PluginName')
sf = xbmcaddon.Addon('plugin.program.super.favourites')
if active_plugin == kodi.get_id():
if sf.getSetting('CONTEXT') == 'true':
log_utils.log('Disabling Global CX while SALTS is active', log_utils.LOGDEBUG)
was_on = True
sf.setSetting('CONTEXT', 'false')
elif was_on:
log_utils.log('Re-enabling Global CX while SALTS is not active', log_utils.LOGDEBUG)
sf.setSetting('CONTEXT', 'true')
was_on = False
errors = 0
while not xbmc.abortRequested:
try:
isPlaying = monitor.isPlaying()
utils.do_scheduled_task(MODES.UPDATE_SUBS, isPlaying)
if monitor.tracked and monitor.isPlayingVideo():
monitor._lastPos = monitor.getTime()
except Exception as e:
errors += 1
if errors >= MAX_ERRORS:
log_utils.log('Service: Error (%s) received..(%s/%s)...Ending Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
break
else:
log_utils.log('Service: Error (%s) received..(%s/%s)...Continuing Service...' % (e, errors, MAX_ERRORS), log_utils.LOGERROR)
else:
errors = 0
xbmc.sleep(1000)
disable_global_cx()
log_utils.log('Service: shutting down...')
| gpl-2.0 |
metacloud/python-neutronclient | neutronclient/tests/unit/test_cli20_floatingips.py | 7 | 5680 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.neutron.v2_0 import floatingip as fip
from neutronclient.tests.unit import test_cli20
class CLITestV20FloatingIpsJSON(test_cli20.CLITestV20Base):
def test_create_floatingip(self):
"""Create floatingip: fip1."""
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
args = [name]
position_names = ['floating_network_id']
position_values = [name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_and_port(self):
"""Create floatingip: fip1."""
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
args = [name, '--port_id', pid]
position_names = ['floating_network_id', 'port_id']
position_values = [name, pid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid]
position_names = ['floating_network_id', 'port_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_and_port_and_address(self):
"""Create floatingip: fip1 with a given port and address."""
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
addr = '10.0.0.99'
args = [name, '--port_id', pid, '--fixed_ip_address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
position_values = [name, pid, addr]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid, '--fixed-ip-address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_floatingips(self):
"""list floatingips: -D."""
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_floatingips_pagination(self):
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_floatingips_sort(self):
"""list floatingips: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_floatingips_limit(self):
"""list floatingips: -P."""
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_delete_floatingip(self):
"""Delete floatingip: fip1."""
resource = 'floatingip'
cmd = fip.DeleteFloatingIP(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_floatingip(self):
"""Show floatingip: --fields id."""
resource = 'floatingip'
cmd = fip.ShowFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def test_disassociate_ip(self):
"""Disassociate floating IP: myid."""
resource = 'floatingip'
cmd = fip.DisassociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid']
self._test_update_resource(resource, cmd, 'myid',
args, {"port_id": None}
)
def test_associate_ip(self):
"""Associate floating IP: myid portid."""
resource = 'floatingip'
cmd = fip.AssociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'portid']
self._test_update_resource(resource, cmd, 'myid',
args, {"port_id": "portid"}
)
class CLITestV20FloatingIpsXML(CLITestV20FloatingIpsJSON):
format = 'xml'
| apache-2.0 |
robclark/chromium | tools/find_depot_tools.py | 74 | 1374 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Small utility function to find depot_tools and add it to the python path.
Will throw an ImportError exception if depot_tools can't be found since it
imports breakpad.
"""
import os
import sys
def add_depot_tools_to_path():
"""Search for depot_tools and add it to sys.path."""
# First look if depot_tools is already in PYTHONPATH.
for i in sys.path:
if i.rstrip(os.sep).endswith('depot_tools'):
return i
# Then look if depot_tools is in PATH, common case.
for i in os.environ['PATH'].split(os.pathsep):
if i.rstrip(os.sep).endswith('depot_tools'):
sys.path.append(i.rstrip(os.sep))
return i
# Rare case, it's not even in PATH, look upward up to root.
root_dir = os.path.dirname(os.path.abspath(__file__))
previous_dir = os.path.abspath(__file__)
while root_dir and root_dir != previous_dir:
if os.path.isfile(os.path.join(root_dir, 'depot_tools', 'breakpad.py')):
i = os.path.join(root_dir, 'depot_tools')
sys.path.append(i)
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
print >> sys.stderr, 'Failed to find depot_tools'
return None
add_depot_tools_to_path()
# pylint: disable=W0611
import breakpad
| bsd-3-clause |
sunpeak/MITMf | plugins/ferretng.py | 25 | 4149 | # Copyright (c) 2014-2016 Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import json
import sys
from datetime import datetime
from plugins.plugin import Plugin
from twisted.internet import reactor
from twisted.web import http
from core.ferretng.URLMonitor import URLMonitor
class FerretNG(Plugin):
name = "Ferret-NG"
optname = "ferretng"
desc = "Captures cookies and starts a proxy that will feed them to connected clients"
version = "0.1"
def initialize(self, options):
self.options = options
self.ferret_port = options.ferret_port
self.cookie_file = None
URLMonitor.getInstance().hijack_client = self.config['Ferret-NG']['Client']
from core.utils import shutdown
if options.cookie_file:
self.tree_info.append('Loading cookies from log file')
try:
with open(options.cookie_file, 'r') as cookie_file:
self.cookie_file = json.dumps(cookie_file.read())
URLMonitor.getInstance().cookies = self.cookie_file
cookie_file.close()
except Exception as e:
shutdown("[-] Error loading cookie log file: {}".format(e))
self.tree_info.append("Listening on port {}".format(self.ferret_port))
def on_config_change(self):
self.log.info("Will now hijack captured sessions from {}".format(self.config['Ferret-NG']['Client']))
URLMonitor.getInstance().hijack_client = self.config['Ferret-NG']['Client']
def request(self, request):
if 'cookie' in request.headers:
host = request.headers['host']
cookie = request.headers['cookie']
client = request.client.getClientIP()
if client not in URLMonitor.getInstance().cookies:
URLMonitor.getInstance().cookies[client] = []
for entry in URLMonitor.getInstance().cookies[client]:
if host == entry['host']:
self.clientlog.debug("Updating captured session for {}".format(host), extra=request.clientInfo)
entry['host'] = host
entry['cookie'] = cookie
return
self.clientlog.info("Host: {} Captured cookie: {}".format(host, cookie), extra=request.clientInfo)
URLMonitor.getInstance().cookies[client].append({'host': host, 'cookie': cookie})
def reactor(self, StrippingProxy):
from core.ferretng.FerretProxy import FerretProxy
FerretFactory = http.HTTPFactory(timeout=10)
FerretFactory.protocol = FerretProxy
reactor.listenTCP(self.ferret_port, FerretFactory)
def options(self, options):
options.add_argument('--port', dest='ferret_port', metavar='PORT', default=10010, type=int, help='Port to start Ferret-NG proxy on (default 10010)')
options.add_argument('--load-cookies', dest='cookie_file', metavar='FILE', type=str, help='Load cookies from a log file')
def on_shutdown(self):
if not URLMonitor.getInstance().cookies:
return
if self.cookie_file == URLMonitor.getInstance().cookies:
return
self.log.info("Writing cookies to log file")
with open('./logs/ferret-ng/cookies-{}.log'.format(datetime.now().strftime("%Y-%m-%d_%H:%M:%S:%s")), 'w') as cookie_file:
cookie_file.write(str(URLMonitor.getInstance().cookies))
cookie_file.close()
| gpl-3.0 |
Acidburn0zzz/sdk | doc/source/conf.py | 2 | 9222 | # -*- coding: utf-8 -*-
#
# MEGA Client SDK documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 1 16:55:52 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['breathe']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['doc/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MEGA Client SDK'
copyright = u'2013, Mega Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
def get_versions():
"""
Grabs and returns the version and release numbers from autotools.
"""
import re
version_h = open(os.path.join('..', '..', 'include', 'mega', 'version.h')).read()
major = re.search('define MEGA_MAJOR_VERSION ([0-9]+)',
version_h)
minor = re.search('define MEGA_MINOR_VERSION ([0-9]+)',
version_h)
micro = re.search('define MEGA_MICRO_VERSION (.+?)',
version_h)
if major:
major, minor, micro = major.group(1), minor.group(1), micro.group(1)
version = '.'.join([major, minor])
else:
version = 'raw_development'
if micro:
release = '.'.join([major, minor, micro])
else:
release = 'raw_development'
return version, release
# Version: The short X.Y version.
# Release: The full version, including alpha/beta/rc tags.
version, release = get_versions()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# Set the default domain for this project to C++
primary_domain = 'cpp'
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for Doxygen input ------------------------------------------------
breathe_projects = {
'megasdk': '../sphinx_api/xml/',
}
breathe_default_project = 'megasdk'
#breathe_domain_by_extension = {
# 'h': 'cpp',
#}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../images/MegaPC.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['doc/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MEGAClientSDKdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MEGAClientSDK.tex', u'MEGA Client SDK Documentation',
u'Mathias Ortmann \\textless{}mo@mega.co.nz\\textgreater{}\\\\ Guy Kloss \\textless{}gk@mega.co.nz\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../images/MegaLogo_512t.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'megaclientsdk', u'MEGA Client SDK Documentation',
[u'Mathias Ortmann <mo@mega.co.nz>, Paul Ionkin <pi@mega.co.nz>, Guy Kloss <gk@mega.co.nz>'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MEGAClientSDK', u'MEGA Client SDK Documentation',
u'Mathias Ortmann <mo@mega.co.nz>, Paul Ionkin <pi@mega.co.nz>, Guy Kloss <gk@mega.co.nz>', 'MEGAClientSDK', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-2-clause |
ppanczyk/ansible | lib/ansible/modules/packaging/os/apt_key.py | 22 | 12347 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: apt_key
author:
- Jayson Vantuyl (@jvantuyl)
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it.
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
description:
- The identifier of the key.
- Including this allows check mode to correctly report the changed state.
- If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
- This parameter is required when C(state) is set to C(absent).
data:
description:
- The keyfile contents to add to the keyring.
file:
description:
- The path to a keyfile on the remote server to add to the keyring.
keyring:
description:
-The path to specific keyring file in /etc/apt/trusted.gpg.d/
version_added: "1.3"
url:
description:
- The URL to retrieve key from.
keyserver:
description:
- The keyserver to retrieve key from.
version_added: "1.6"
state:
description:
- Ensures that the key is present (added) or absent (revoked).
choices: [ absent, present ]
default: present
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: Add an apt key by id from a keyserver
apt_key:
keyserver: keyserver.ubuntu.com
id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
- name: Add an Apt signing key, uses whichever key is at the URL
apt_key:
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
state: present
- name: Add an Apt signing key, will not download if present
apt_key:
id: 473041FA
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
state: present
- name: Remove a Apt specific signing key, leading 0x is valid
apt_key:
id: 0x473041FA
state: absent
# Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
- name: Add a key from a file on the Ansible server.
apt_key:
data: "{{ lookup('file', 'apt.asc') }}"
state: present
- name: Add an Apt signing key to a specific keyring file
apt_key:
id: 473041FA
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
keyring: /etc/apt/trusted.gpg.d/debian.gpg
- name: Add Apt signing key on remote server to keyring
apt_key:
id: 473041FA
file: /tmp/apt.gpg
state: present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
apt_key_bin = None
def find_needed_binaries(module):
global apt_key_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
### FIXME: Is there a reason that gpg and grep are checked? Is it just
# cruft or does the apt .deb package not require them (and if they're not
# installed, /usr/bin/apt-key fails?)
module.get_bin_path('gpg', required=True)
module.get_bin_path('grep', required=True)
def parse_key_id(key_id):
"""validate the key_id and break it into segments
:arg key_id: The key_id as supplied by the user. A valid key_id will be
8, 16, or more hexadecimal chars with an optional leading ``0x``.
:returns: The portion of key_id suitable for apt-key del, the portion
suitable for comparisons with --list-public-keys, and the portion that
can be used with --recv-key. If key_id is long enough, these will be
the last 8 characters of key_id, the last 16 characters, and all of
key_id. If key_id is not long enough, some of the values will be the
same.
* apt-key del <= 1.10 has a bug with key_id != 8 chars
* apt-key adv --list-public-keys prints 16 chars
* apt-key adv --recv-key can take more chars
"""
# Make sure the key_id is valid hexadecimal
int(key_id, 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
key_id = key_id[2:]
key_id_len = len(key_id)
if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
short_key_id = key_id[-8:]
fingerprint = key_id
if key_id_len > 16:
fingerprint = key_id[-16:]
return short_key_id, fingerprint, key_id
def all_keys(module, keyring, short_format):
if keyring:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
results = []
lines = to_native(out).split('\n')
for line in lines:
if (line.startswith("pub") or line.startswith("sub")) and not "expired" in line:
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
for retry in range(5):
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
else:
# Out of retries
if rc == 2 and 'not found on keyserver' in out:
msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg)
else:
msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False, type='path'),
key=dict(required=False),
keyring=dict(required=False, type='path'),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True,
mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),),
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
fingerprint = short_key_id = key_id
short_format = False
if key_id:
try:
short_key_id, fingerprint, key_id = parse_key_id(key_id)
except ValueError:
module.fail_json(msg='Invalid key_id', id=key_id)
if len(fingerprint) == 8:
short_format = True
find_needed_binaries(module)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if fingerprint and fingerprint in keys:
module.exit_json(changed=False)
elif fingerprint and fingerprint not in keys and module.check_mode:
### TODO: Someday we could go further -- write keys out to
# a temporary file and then extract the key id from there via gpg
# to decide if the key is installed or not.
module.exit_json(changed=True)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed = False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if fingerprint and fingerprint not in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if fingerprint in keys:
if module.check_mode:
module.exit_json(changed=True)
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if remove_key(module, short_key_id, keyring):
keys = all_keys(module, keyring, short_format)
if fingerprint in keys:
module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)",
id=key_id)
changed = True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
if __name__ == '__main__':
main()
| gpl-3.0 |
Technux/linux | scripts/gdb/linux/dmesg.py | 630 | 1991 | #
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = inf.read_memory(start, length)
else:
log_buf_2nd_half = log_buf_len - log_first_idx
log_buf = inf.read_memory(start, log_buf_2nd_half) + \
inf.read_memory(log_buf_addr, log_next_idx)
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len]
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in memoryview(text).tobytes().splitlines():
gdb.write("[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line))
pos += length
LxDmesg()
| gpl-2.0 |
nborggren/zipline | zipline/test_algorithms.py | 1 | 34680 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithm Protocol
===================
For a class to be passed as a trading algorithm to the
:py:class:`zipline.lines.SimulatedTrading` zipline it must follow an
implementation protocol. Examples of this algorithm protocol are provided
below.
The algorithm must expose methods:
- initialize: method that takes no args, no returns. Simply called to
enable the algorithm to set any internal state needed.
- get_sid_filter: method that takes no args, and returns a list of valid
sids. List must have a length between 1 and 10. If None is returned the
filter will block all events.
- handle_data: method that accepts a :py:class:`zipline.protocol.BarData`
of the current state of the simulation universe. An example data object:
.. This outputs the table as an HTML table but for some reason there
is no bounding box. Make the previous paraagraph ending colon a
double-colon to turn this back into blockquoted table in ASCII art.
+-----------------+--------------+----------------+-------------------+
| | sid(133) | sid(134) | sid(135) |
+=================+==============+================+===================+
| price | $10.10 | $22.50 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| volume | 10,000 | 5,000 | 50,000 |
+-----------------+--------------+----------------+-------------------+
| mvg_avg_30 | $9.97 | $22.61 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| dt | 6/30/2012 | 6/30/2011 | 6/29/2012 |
+-----------------+--------------+----------------+-------------------+
- set_order: method that accepts a callable. Will be set as the value of the
order method of trading_client. An algorithm can then place orders with a
valid sid and a number of shares::
self.order(sid(133), share_count)
- set_performance: property which can be set equal to the
cumulative_trading_performance property of the trading_client. An
algorithm can then check position information with the
Portfolio object::
self.Portfolio[sid(133)]['cost_basis']
- set_transact_setter: method that accepts a callable. Will
be set as the value of the set_transact_setter method of
the trading_client. This allows an algorithm to change the
slippage model used to predict transactions based on orders
and trade events.
"""
from copy import deepcopy
import numpy as np
from nose.tools import assert_raises
from six.moves import range
from six import itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
FixedSlippage,
order,
set_slippage,
record,
sid,
)
from zipline.errors import UnsupportedOrderParameters
from zipline.assets import Future, Equity
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.controls import AssetDateBounds
from zipline.transforms import BatchTransform, batch_transform
class TestAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self,
sid,
amount,
order_count,
sid_filter=None,
slippage=None,
commission=None):
self.count = order_count
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
if sid_filter:
self.sid_filter = sid_filter
else:
self.sid_filter = [self.asset.sid]
if slippage is not None:
self.set_slippage(slippage)
if commission is not None:
self.set_commission(commission)
def handle_data(self, data):
# place an order for amount shares of sid
if self.incr < self.count:
self.order(self.asset, self.amount)
self.incr += 1
class HeavyBuyAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self, sid, amount):
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
def handle_data(self, data):
# place an order for 100 shares of sid
self.order(self.asset, self.amount)
self.incr += 1
class NoopAlgorithm(TradingAlgorithm):
"""
Dolce fa niente.
"""
def get_sid_filter(self):
return []
def initialize(self):
pass
def set_transact_setter(self, txn_sim_callable):
pass
def handle_data(self, data):
pass
class ExceptionAlgorithm(TradingAlgorithm):
"""
Throw an exception from the method name specified in the
constructor.
"""
def initialize(self, throw_from, sid):
self.throw_from = throw_from
self.asset = self.sid(sid)
if self.throw_from == "initialize":
raise Exception("Algo exception in initialize")
else:
pass
def set_portfolio(self, portfolio):
if self.throw_from == "set_portfolio":
raise Exception("Algo exception in set_portfolio")
else:
pass
def handle_data(self, data):
if self.throw_from == "handle_data":
raise Exception("Algo exception in handle_data")
else:
pass
def get_sid_filter(self):
if self.throw_from == "get_sid_filter":
raise Exception("Algo exception in get_sid_filter")
else:
return [self.asset]
def set_transact_setter(self, txn_sim_callable):
pass
class DivByZeroAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
self.incr += 1
if self.incr > 4:
5 / 0
pass
class TooMuchProcessingAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
def handle_data(self, data):
# Unless we're running on some sort of
# supercomputer this will hit timeout.
for i in range(1000000000):
self.foo = i
class TimeoutAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
if self.incr > 4:
import time
time.sleep(100)
pass
class RecordAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
record(name, self.incr, 'name2', 2, name3=self.incr)
class TestOrderAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 1
self.order(self.sid(0), 1)
class TestOrderInstantAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
self.last_price, "Orders was not filled at last price."
self.incr += 2
self.order_value(self.sid(0), data[0].price * 2.)
self.last_price = data[0].price
class TestOrderStyleForwardingAlgorithm(TradingAlgorithm):
"""
Test Algorithm for verifying that ExecutionStyles are properly forwarded by
order API helper methods. Pass the name of the method to be tested as a
string parameter to this algorithm's constructor.
"""
def __init__(self, *args, **kwargs):
self.method_name = kwargs.pop('method_name')
super(TestOrderStyleForwardingAlgorithm, self)\
.__init__(*args, **kwargs)
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert len(self.portfolio.positions.keys()) == 0
method_to_check = getattr(self, self.method_name)
method_to_check(self.sid(0),
data[0].price,
style=StopLimitOrder(10, 10))
assert len(self.blotter.open_orders[0]) == 1
result = self.blotter.open_orders[0][0]
assert result.limit == 10
assert result.stop == 10
self.incr += 1
class TestOrderValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.sale_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 2
multiplier = 2.
if isinstance(self.sid(0), Future):
multiplier *= self.sid(0).multiplier
self.order_value(self.sid(0), data[0].price * multiplier)
class TestTargetAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.target_shares = np.random.randint(1, 30)
self.order_target(self.sid(0), self.target_shares)
class TestOrderPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(self.sid(0), .001)
if isinstance(self.sid(0), Equity):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) / data[0].price
)
if isinstance(self.sid(0), Future):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) /
(data[0].price * self.sid(0).multiplier)
)
class TestTargetPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.target_shares = 1
else:
assert np.round(self.portfolio.portfolio_value * 0.002) == \
self.portfolio.positions[0]['amount'] * self.sale_price, \
"Orders not filled correctly."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.sale_price = data[0].price
self.order_target_percent(self.sid(0), .002)
class TestTargetValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
print(self.portfolio)
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_target_value(self.sid(0), 20)
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Equity):
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Future):
self.target_shares = np.round(
20 / (data[0].price * self.sid(0).multiplier))
class FutureFlipAlgo(TestAlgorithm):
def handle_data(self, data):
if len(self.portfolio.positions) > 0:
if self.portfolio.positions[self.asset.sid]["amount"] > 0:
self.order_target(self.asset, -self.amount)
else:
self.order_target(self.asset, 0)
else:
self.order_target(self.asset, self.amount)
############################
# AccountControl Test Algos#
############################
class SetMaxLeverageAlgorithm(TradingAlgorithm):
def initialize(self, max_leverage=None):
self.set_max_leverage(max_leverage=max_leverage)
############################
# TradingControl Test Algos#
############################
class SetMaxPositionSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_position_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_order_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetDoNotOrderListAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, restricted_list=None):
self.order_count = 0
self.set_do_not_order_list(restricted_list)
class SetMaxOrderCountAlgorithm(TradingAlgorithm):
def initialize(self, count):
self.order_count = 0
self.set_max_order_count(count)
class SetLongOnlyAlgorithm(TradingAlgorithm):
def initialize(self):
self.order_count = 0
self.set_long_only()
class SetAssetDateBoundsAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to order 1 share of sid 0 on every bar and has an
AssetDateBounds() trading control in place.
"""
def initialize(self):
self.register_trading_control(AssetDateBounds())
def handle_data(algo, data):
algo.order(algo.sid(0), 1)
class TestRegisterTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.set_slippage(FixedSlippage())
def handle_data(self, data):
pass
class AmbitiousStopLimitAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to buy with extremely low stops/limits and tries to
sell with extremely high versions of same. Should not end up with any
positions for reasonable data.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sid'))
def handle_data(self, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
self.order(self.asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
self.order(self.asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
self.order(self.asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
self.order(self.asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
self.order(self.asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
self.order(self.asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
self.order(self.asset, 100, limit_price=.00000001)
self.order(self.asset, -100, stop_price=.00000001)
##########################################
# Algorithm using simple batch transforms
class ReturnPriceBatchTransform(BatchTransform):
def get_value(self, data):
assert data.shape[1] == self.window_length, \
"data shape={0} does not equal window_length={1} for data={2}".\
format(data.shape[1], self.window_length, data)
return data.price
@batch_transform
def return_price_batch_decorator(data):
return data.price
@batch_transform
def return_args_batch_decorator(data, *args, **kwargs):
return args, kwargs
@batch_transform
def return_data(data, *args, **kwargs):
return data
@batch_transform
def uses_ufunc(data, *args, **kwargs):
# ufuncs like np.log should not crash
return np.log(data)
@batch_transform
def price_multiple(data, multiplier, extra_arg=1):
return data.price * multiplier * extra_arg
class BatchTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history_return_price_class = []
self.history_return_price_decorator = []
self.history_return_args = []
self.history_return_arbitrary_fields = []
self.history_return_nan = []
self.history_return_sid_filter = []
self.history_return_field_filter = []
self.history_return_field_no_filter = []
self.history_return_ticks = []
self.history_return_not_full = []
self.return_price_class = ReturnPriceBatchTransform(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_price_decorator = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_args_batch = return_args_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_arbitrary_fields = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_nan = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_sid_filter = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
sids=[0]
)
self.return_field_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
fields=['price']
)
self.return_field_no_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_not_full = return_data(
refresh_period=1,
window_length=self.window_length,
compute_only_full=False
)
self.uses_ufunc = uses_ufunc(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.price_multiple = price_multiple(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.iter = 0
self.set_slippage(FixedSlippage())
def handle_data(self, data):
self.history_return_price_class.append(
self.return_price_class.handle_data(data))
self.history_return_price_decorator.append(
self.return_price_decorator.handle_data(data))
self.history_return_args.append(
self.return_args_batch.handle_data(
data, *self.args, **self.kwargs))
self.history_return_not_full.append(
self.return_not_full.handle_data(data))
self.uses_ufunc.handle_data(data)
# check that calling transforms with the same arguments
# is idempotent
self.price_multiple.handle_data(data, 1, extra_arg=1)
if self.price_multiple.full:
pre = self.price_multiple.rolling_panel.get_current().shape[0]
result1 = self.price_multiple.handle_data(data, 1, extra_arg=1)
post = self.price_multiple.rolling_panel.get_current().shape[0]
assert pre == post, "batch transform is appending redundant events"
result2 = self.price_multiple.handle_data(data, 1, extra_arg=1)
assert result1 is result2, "batch transform is not idempotent"
# check that calling transform with the same data, but
# different supplemental arguments results in new
# results.
result3 = self.price_multiple.handle_data(data, 2, extra_arg=1)
assert result1 is not result3, \
"batch transform is not updating for new args"
result4 = self.price_multiple.handle_data(data, 1, extra_arg=2)
assert result1 is not result4,\
"batch transform is not updating for new kwargs"
new_data = deepcopy(data)
for sidint in new_data:
new_data[sidint]['arbitrary'] = 123
self.history_return_arbitrary_fields.append(
self.return_arbitrary_fields.handle_data(new_data))
# nan every second event price
if self.iter % 2 == 0:
self.history_return_nan.append(
self.return_nan.handle_data(data))
else:
nan_data = deepcopy(data)
nan_data.price = np.nan
self.history_return_nan.append(
self.return_nan.handle_data(nan_data))
self.iter += 1
# Add a new sid to check that it does not get included
extra_sid_data = deepcopy(data)
extra_sid_data[1] = extra_sid_data[0]
self.history_return_sid_filter.append(
self.return_sid_filter.handle_data(extra_sid_data)
)
# Add a field to check that it does not get included
extra_field_data = deepcopy(data)
extra_field_data[0]['ignore'] = extra_sid_data[0]['price']
self.history_return_field_filter.append(
self.return_field_filter.handle_data(extra_field_data)
)
self.history_return_field_no_filter.append(
self.return_field_no_filter.handle_data(extra_field_data)
)
class BatchTransformAlgorithmMinute(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history = []
self.batch_transform = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False,
bars='minute'
)
def handle_data(self, data):
self.history.append(self.batch_transform.handle_data(data))
class SetPortfolioAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to set the portfolio directly.
The portfolio should be treated as a read-only object
within the algorithm.
"""
def initialize(self, *args, **kwargs):
pass
def handle_data(self, data):
self.portfolio = 3
class TALIBAlgorithm(TradingAlgorithm):
"""
An algorithm that applies a TA-Lib transform. The transform object can be
passed at initialization with the 'talib' keyword argument. The results are
stored in the talib_results array.
"""
def initialize(self, *args, **kwargs):
if 'talib' not in kwargs:
raise KeyError('No TA-LIB transform specified '
'(use keyword \'talib\').')
elif not isinstance(kwargs['talib'], (list, tuple)):
self.talib_transforms = (kwargs['talib'],)
else:
self.talib_transforms = kwargs['talib']
self.talib_results = dict((t, []) for t in self.talib_transforms)
def handle_data(self, data):
for t in self.talib_transforms:
result = t.handle_data(data)
if result is None:
if len(t.talib_fn.output_names) == 1:
result = np.nan
else:
result = (np.nan,) * len(t.talib_fn.output_names)
self.talib_results[t].append(result)
class EmptyPositionsAlgorithm(TradingAlgorithm):
"""
An algorithm that ensures that 'phantom' positions do not appear
portfolio.positions in the case that a position has been entered
and fully exited.
"""
def initialize(self, *args, **kwargs):
self.ordered = False
self.exited = False
def handle_data(self, data):
if not self.ordered:
for s in data:
self.order(self.sid(s), 100)
self.ordered = True
if not self.exited:
amounts = [pos.amount for pos
in itervalues(self.portfolio.positions)]
if (
all([(amount == 100) for amount in amounts]) and
(len(amounts) == len(data.keys()))
):
for stock in self.portfolio.positions:
self.order(self.sid(stock), -100)
self.exited = True
# Should be 0 when all positions are exited.
self.record(num_positions=len(self.portfolio.positions))
class InvalidOrderAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to make various invalid order calls, verifying that
appropriate exceptions are raised.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sids')[0])
def handle_data(self, data):
from zipline.api import (
order_percent,
order_target,
order_target_percent,
order_target_value,
order_value,
)
for style in [MarketOrder(), LimitOrder(10),
StopOrder(10), StopLimitOrder(10, 10)]:
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
stop_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
stop_price=10,
style=style)
class TestRemoveDataAlgo(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.data = np.zeros(7)
self.i = 0
def handle_data(self, data):
self.data[self.i] = len(data)
self.i += 1
##############################
# Quantopian style algorithms
# Noop algo
def initialize_noop(context):
pass
def handle_data_noop(context, data):
pass
# API functions
def initialize_api(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data_api(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
###########################
# AlgoScripts as strings
noop_algo = """
# Noop algo
def initialize(context):
pass
def handle_data(context, data):
pass
"""
api_algo = """
from zipline.api import (order,
set_slippage,
FixedSlippage,
record,
sid)
def initialize(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
"""
api_get_environment_algo = """
from zipline.api import get_environment, order, symbol
def initialize(context):
context.environment = get_environment()
handle_data = lambda context, data: order(symbol('TEST'), 1)
"""
api_symbol_algo = """
from zipline.api import (order,
symbol)
def initialize(context):
pass
def handle_data(context, data):
order(symbol('TEST'), 1)
"""
call_order_in_init = """
from zipline.api import (order)
def initialize(context):
order(0, 10)
pass
def handle_data(context, data):
pass
"""
access_portfolio_in_init = """
def initialize(context):
var = context.portfolio.cash
pass
def handle_data(context, data):
pass
"""
access_account_in_init = """
def initialize(context):
var = context.account.settled_cash
pass
def handle_data(context, data):
pass
"""
call_all_order_methods = """
from zipline.api import (order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent,
sid)
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
order_value(sid(0), 300)
order_percent(sid(0), .1)
order_target(sid(0), 100)
order_target_value(sid(0), 100)
order_target_percent(sid(0), .2)
"""
record_variables = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(incr=context.incr)
"""
record_float_magic = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(data=float('%s'))
"""
| apache-2.0 |
metamx/spark | python/pyspark/sql/column.py | 15 | 15008 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
if sys.version >= '3':
basestring = str
long = int
from pyspark import copy_func, since
from pyspark.context import SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.types import *
__all__ = ["DataFrame", "Column", "DataFrameNaFunctions", "DataFrameStatFunctions"]
def _create_column_from_literal(literal):
sc = SparkContext._active_spark_context
return sc._jvm.functions.lit(literal)
def _create_column_from_name(name):
sc = SparkContext._active_spark_context
return sc._jvm.functions.col(name)
def _to_java_column(col):
if isinstance(col, Column):
jcol = col._jc
else:
jcol = _create_column_from_name(col)
return jcol
def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(cols)
def _to_list(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM (Scala) List of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toList(cols)
def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
def _func_op(name, doc=''):
def _(self):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
def _bin_func_op(name, reverse=False, doc="binary function"):
def _(self, other):
sc = SparkContext._active_spark_context
fn = getattr(sc._jvm.functions, name)
jc = other._jc if isinstance(other, Column) else _create_column_from_literal(other)
njc = fn(self._jc, jc) if not reverse else fn(jc, self._jc)
return Column(njc)
_.__doc__ = doc
return _
def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _
def _reverse_op(name, doc="binary operator"):
""" Create a method for binary operator (this object is on right side)
"""
def _(self, other):
jother = _create_column_from_literal(other)
jc = getattr(jother, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
class Column(object):
"""
A column in a DataFrame.
:class:`Column` instances can be created by::
# 1. Select a column out of a DataFrame
df.colName
df["colName"]
# 2. Create from an expression
df.colName + 1
1 / df.colName
.. versionadded:: 1.3
"""
def __init__(self, jc):
self._jc = jc
# arithmetic operators
__neg__ = _func_op("negate")
__add__ = _bin_op("plus")
__sub__ = _bin_op("minus")
__mul__ = _bin_op("multiply")
__div__ = _bin_op("divide")
__truediv__ = _bin_op("divide")
__mod__ = _bin_op("mod")
__radd__ = _bin_op("plus")
__rsub__ = _reverse_op("minus")
__rmul__ = _bin_op("multiply")
__rdiv__ = _reverse_op("divide")
__rtruediv__ = _reverse_op("divide")
__rmod__ = _reverse_op("mod")
__pow__ = _bin_func_op("pow")
__rpow__ = _bin_func_op("pow", reverse=True)
# logistic operators
__eq__ = _bin_op("equalTo")
__ne__ = _bin_op("notEqual")
__lt__ = _bin_op("lt")
__le__ = _bin_op("leq")
__ge__ = _bin_op("geq")
__gt__ = _bin_op("gt")
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _bin_op('and')
__or__ = _bin_op('or')
__invert__ = _func_op('not')
__rand__ = _bin_op("and")
__ror__ = _bin_op("or")
# container operators
__contains__ = _bin_op("contains")
__getitem__ = _bin_op("apply")
# bitwise operators
bitwiseOR = _bin_op("bitwiseOR")
bitwiseAND = _bin_op("bitwiseAND")
bitwiseXOR = _bin_op("bitwiseXOR")
@since(1.3)
def getItem(self, key):
"""
An expression that gets an item at position ``ordinal`` out of a list,
or gets an item by key out of a dict.
>>> df = sc.parallelize([([1, 2], {"key": "value"})]).toDF(["l", "d"])
>>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
>>> df.select(df.l[0], df.d["key"]).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
"""
return self[key]
@since(1.3)
def getField(self, name):
"""
An expression that gets a field by name in a StructField.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(r=Row(a=1, b="b"))]).toDF()
>>> df.select(df.r.getField("b")).show()
+---+
|r.b|
+---+
| b|
+---+
>>> df.select(df.r.a).show()
+---+
|r.a|
+---+
| 1|
+---+
"""
return self[name]
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
return self.getField(item)
def __iter__(self):
raise TypeError("Column is not iterable")
# string methods
rlike = _bin_op("rlike")
like = _bin_op("like")
startswith = _bin_op("startsWith")
endswith = _bin_op("endsWith")
@ignore_unicode_prefix
@since(1.3)
def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col=u'Ali'), Row(col=u'Bob')]
"""
if type(startPos) != type(length):
raise TypeError("Can not mix the type")
if isinstance(startPos, (int, long)):
jc = self._jc.substr(startPos, length)
elif isinstance(startPos, Column):
jc = self._jc.substr(startPos._jc, length._jc)
else:
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
__getslice__ = substr
@ignore_unicode_prefix
@since(1.5)
def isin(self, *cols):
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
>>> df[df.name.isin("Bob", "Mike")].collect()
[Row(age=5, name=u'Bob')]
>>> df[df.age.isin([1, 2, 3])].collect()
[Row(age=2, name=u'Alice')]
"""
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols]
sc = SparkContext._active_spark_context
jc = getattr(self._jc, "isin")(_to_seq(sc, cols))
return Column(jc)
# order
asc = _unary_op("asc", "Returns a sort expression based on the"
" ascending order of the given column name.")
desc = _unary_op("desc", "Returns a sort expression based on the"
" descending order of the given column name.")
isNull = _unary_op("isNull", "True if the current expression is null.")
isNotNull = _unary_op("isNotNull", "True if the current expression is not null.")
@since(1.3)
def alias(self, *alias):
"""
Returns this column aliased with a new name or names (in the case of expressions that
return more than one column, such as explode).
>>> df.select(df.age.alias("age2")).collect()
[Row(age2=2), Row(age2=5)]
"""
if len(alias) == 1:
return Column(getattr(self._jc, "as")(alias[0]))
else:
sc = SparkContext._active_spark_context
return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
name = copy_func(alias, sinceversion=2.0, doc=":func:`name` is an alias for :func:`alias`.")
@ignore_unicode_prefix
@since(1.3)
def cast(self, dataType):
""" Convert the column into type ``dataType``.
>>> df.select(df.age.cast("string").alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
>>> df.select(df.age.cast(StringType()).alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
"""
if isinstance(dataType, basestring):
jc = self._jc.cast(dataType)
elif isinstance(dataType, DataType):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
jdt = spark._jsparkSession.parseDataType(dataType.json())
jc = self._jc.cast(jdt)
else:
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.")
@since(1.3)
def between(self, lowerBound, upperBound):
"""
A boolean expression that is evaluated to true if the value of this
expression is between the given columns.
>>> df.select(df.name, df.age.between(2, 4)).show()
+-----+---------------------------+
| name|((age >= 2) AND (age <= 4))|
+-----+---------------------------+
|Alice| true|
| Bob| false|
+-----+---------------------------+
"""
return (self >= lowerBound) & (self <= upperBound)
@since(1.4)
def when(self, condition, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)).show()
+-----+------------------------------------------------------------+
| name|CASE WHEN (age > 4) THEN 1 WHEN (age < 3) THEN -1 ELSE 0 END|
+-----+------------------------------------------------------------+
|Alice| -1|
| Bob| 1|
+-----+------------------------------------------------------------+
"""
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = self._jc.when(condition._jc, v)
return Column(jc)
@since(1.4)
def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 3, 1).otherwise(0)).show()
+-----+-------------------------------------+
| name|CASE WHEN (age > 3) THEN 1 ELSE 0 END|
+-----+-------------------------------------+
|Alice| 0|
| Bob| 1|
+-----+-------------------------------------+
"""
v = value._jc if isinstance(value, Column) else value
jc = self._jc.otherwise(v)
return Column(jc)
@since(1.4)
def over(self, window):
"""
Define a windowing column.
:param window: a :class:`WindowSpec`
:return: a Column
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1)
>>> from pyspark.sql.functions import rank, min
>>> # df.select(rank().over(window), min('age').over(window))
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
def __nonzero__(self):
raise ValueError("Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
"'~' for 'not' when building DataFrame boolean expressions.")
__bool__ = __nonzero__
def __repr__(self):
return 'Column<%s>' % self._jc.toString().encode('utf8')
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.column tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jn7163/django | tests/forms_tests/tests/test_forms.py | 46 | 148965 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
import json
import uuid
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import RegexValidator
from django.forms import (
BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField,
DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput,
ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput,
MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select,
SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput,
TimeField, ValidationError, forms,
)
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Context, Template
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html
from django.utils.safestring import SafeData, mark_safe
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(SimpleTestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'"
with six.assertRaisesRegex(self, KeyError, nonexistenterror):
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']})
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="test@example.com" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': 'test@example.com', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': 'test@example.com', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': 'test@example.com', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(str(f['language']), """<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
# Test iterating on individual radios in a template
t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0">
<input id="id_language_0" name="language" type="radio" value="P" /> Python</label></div>
<div class="myradio"><label for="id_language_1">
<input id="id_language_1" name="language" type="radio" value="J" /> Java</label></div>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_form_with_disabled_fields(self):
class PersonForm(Form):
name = CharField()
birthday = DateField(disabled=True)
class PersonFormFieldInitial(Form):
name = CharField()
birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16))
# Disabled fields are generally not transmitted by user agents.
# The value from the form's initial data is used.
f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial({'name': 'John Doe'})
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
# Values provided in the form's data are ignored.
data = {'name': 'John Doe', 'birthday': '1984-11-10'}
f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
# Test iterating on individual checkboxes in a template
t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="J" /> John Lennon</label></div>
<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="P" /> Paul McCartney</label></div>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id">
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict and
# MultiValueDict conveniently work with this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE':
errors['password1'] = 'Forbidden value.'
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE':
errors['password2'] = ['Forbidden value.']
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2':
self.add_error(None, 'Non-field error 1.')
self.add_error('password1', 'Forbidden value 2.')
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2':
self.add_error('password2', 'Forbidden value 2.')
raise ValidationError('Non-field error 2.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE'}, auto_id=False)
self.assertEqual(f.errors['password1'], ['Forbidden value.'])
self.assertEqual(f.errors['password2'], ['Forbidden value.'])
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.'])
self.assertEqual(f.errors['password1'], ['Forbidden value 2.'])
self.assertEqual(f.errors['password2'], ['Forbidden value 2.'])
with six.assertRaisesRegex(self, ValueError, "has no field named"):
f.add_error('missing_field', 'Some error.')
def test_update_error_dict(self):
class CodeForm(Form):
code = CharField(max_length=10)
def clean(self):
try:
raise ValidationError({'code': [ValidationError('Code error 1.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': [ValidationError('Code error 2.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': forms.ErrorList(['Code error 3.'])})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError('Non-field error 1.')
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError([ValidationError('Non-field error 2.')])
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# Ensure that the newly added list of errors is an instance of ErrorList.
for field, error_list in self._errors.items():
if not isinstance(error_list, self.error_class):
self._errors[field] = self.error_class(error_list)
form = CodeForm({'code': 'hello'})
# Trigger validation.
self.assertFalse(form.is_valid())
# Check that update_error_dict didn't lose track of the ErrorDict type.
self.assertIsInstance(form._errors, forms.ErrorDict)
self.assertEqual(dict(form.errors), {
'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'],
NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'],
})
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError(
'Please make sure your passwords match.',
code='password_mismatch',
)
f = UserRegistration(data={})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'required'))
self.assertFalse(f.has_error('password1', 'anything'))
f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'min_length'))
self.assertFalse(f.has_error('password1', 'anything'))
self.assertFalse(f.has_error('password2'))
self.assertFalse(f.has_error('password2', 'anything'))
f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'})
self.assertFalse(f.has_error('password1'))
self.assertFalse(f.has_error('password1', 'required'))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch'))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything'))
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_explicit_field_order(self):
class TestFormParent(Form):
field1 = CharField()
field2 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field_order = ['field6', 'field5', 'field4', 'field2', 'field1']
class TestForm(TestFormParent):
field3 = CharField()
field_order = ['field2', 'field4', 'field3', 'field5', 'field6']
class TestFormRemove(TestForm):
field1 = None
class TestFormMissing(TestForm):
field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1']
field1 = None
class TestFormInit(TestFormParent):
field3 = CharField()
field_order = None
def __init__(self, **kwargs):
super(TestFormInit, self).__init__(**kwargs)
self.order_fields(field_order=TestForm.field_order)
p = TestFormParent()
self.assertEqual(list(p.fields.keys()), TestFormParent.field_order)
p = TestFormRemove()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestFormMissing()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestForm()
self.assertEqual(list(p.fields.keys()), TestFormMissing.field_order)
p = TestFormInit()
order = list(TestForm.field_order) + ['field1']
self.assertEqual(list(p.fields.keys()), order)
TestForm.field_order = ['unknown']
p = TestForm()
self.assertEqual(list(p.fields.keys()), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3'])
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
answer = CharField(label='Secret answer', label_suffix=' =')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n<li>Secret answer = <input type="text" name="answer" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f', 'b']
def initial_other_options():
return ['b', 'w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_boundfield_initial_called_once(self):
"""
Multiple calls to BoundField().value() in an unbound form should return
the same result each time (#24391).
"""
class MyForm(Form):
name = CharField(max_length=10, initial=uuid.uuid4)
form = MyForm()
name = form['name']
self.assertEqual(name.value(), name.value())
# BoundField is also cached
self.assertIs(form['name'], name)
def test_boundfield_rendering(self):
"""
Python 2 issue: Test that rendering a BoundField with bytestring content
doesn't lose it's safe string status (#22950).
"""
class CustomWidget(TextInput):
def render(self, name, value, attrs=None):
return format_html(str('<input{} />'), ' id=custom')
class SampleForm(Form):
name = CharField(widget=CustomWidget)
f = SampleForm(data={'name': 'bar'})
self.assertIsInstance(force_text(f['name']), SafeData)
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class HiddenInputWithoutMicrosec(HiddenInput):
supports_microseconds = False
class TextInputWithoutMicrosec(TextInput):
supports_microseconds = False
class DateTimeForm(Form):
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput)
hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec)
ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec)
unbound = DateTimeForm()
self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms)
self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time())
self.assertEqual(unbound['supports_microseconds'].value(), now)
self.assertEqual(unbound['hi_default_microsec'].value(), now)
self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms)
self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., user@example.com')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""")
self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., user@example.com</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., user@example.com')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li>
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_class_prefix(self):
# Prefix can be also specified at the class level.
class Person(Form):
first_name = CharField()
prefix = 'foo'
p = Person()
self.assertEqual(p.prefix, 'foo')
p = Person(prefix='bar')
self.assertEqual(p.prefix, 'bar')
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"))
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '')
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""")
self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label class="required" for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""")
def test_label_has_required_css_class(self):
"""
#17922 - required_css_class is added to the label_tag() of required fields.
"""
class SomeForm(Form):
required_css_class = 'required'
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({'field': 'test'})
self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>')
self.assertHTMLEqual(f['field'].label_tag(attrs={'class': 'foo'}),
'<label for="id_field" class="foo required">Field:</label>')
self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>')
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name': ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name': ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name': ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name': 'fname lname'})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (ChoiceField(label='Rank',
choices=((1, 1), (2, 2))),
CharField(label='Name', max_length=10))
super(ChoicesField, self).__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertIsNot(field2.fields, field.fields)
self.assertIsNot(field2.fields[0].choices, field.fields[0].choices)
def test_multivalue_initial_data(self):
"""
#23674 -- invalid initial data should not break form.changed_data()
"""
class DateAgeField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (DateField(label="Date"), IntegerField(label="Age"))
super(DateAgeField, self).__init__(fields=fields, *args, **kwargs)
class DateAgeForm(Form):
date_age = DateAgeField()
data = {"date_age": ["1998-12-06", 16]}
form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]})
self.assertTrue(form.has_changed())
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(label='Country Code', validators=[
RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]),
CharField(label='Phone Number'),
CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}),
CharField(label='Label', required=False, help_text='E.g. home, work.'),
)
super(PhoneField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return '%s.%s ext. %s (label: %s)' % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61'])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123'])
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertEqual('+61. ext. (label: )', f.clean(['+61']))
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'})
form.full_clean()
self.assertEqual(form.cleaned_data, {'json': {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field:</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom:</label>'),
# the overridden label is escaped
(('custom&',), {}, '<label for="id_field">custom&:</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field:')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&:')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>')
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label='')
boundfield = SomeForm()['field']
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
def test_boundfield_id_for_label(self):
class SomeForm(Form):
field = CharField(label='')
self.assertEqual(SomeForm()['field'].id_for_label, 'id_field')
def test_boundfield_id_for_label_override_by_attrs(self):
"""
If an id is provided in `Widget.attrs`, it overrides the generated ID,
unless it is `None`.
"""
class SomeForm(Form):
field = CharField(widget=forms.TextInput(attrs={'id': 'myCustomID'}))
field_none = CharField(widget=forms.TextInput(attrs={'id': None}))
form = SomeForm()
self.assertEqual(form['field'].id_for_label, 'myCustomID')
self.assertEqual(form['field_none'].id_for_label, 'id_field_none')
def test_label_tag_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix='!')['field']
self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>')
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_field_without_css_classes(self):
"""
`css_classes` may be used as a key in _html_output() (empty classes).
"""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class=""></p>')
def test_field_with_css_class(self):
"""
`css_classes` may be used as a key in _html_output() (class comes
from required_css_class in this case).
"""
class SomeForm(Form):
some_field = CharField()
required_css_class = 'foo'
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>')
def test_field_name_with_hidden_input(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /></p>'
)
def test_field_name_with_hidden_input_and_non_matching_row_ender(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row ended with the specific row ender.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='<hr/><hr/>',
help_text_html=' %s',
errors_on_separate_row=True
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom</p>\n'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /><hr/><hr/>'
)
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2})
form = MyForm({})
self.assertEqual(form.is_valid(), False)
errors = form.errors.as_text()
control = [
'* foo\n * This field is required.',
'* bar\n * This field is required.',
'* __all__\n * Non-field error.',
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>',
]
for error in control:
self.assertInHTML(error, errors)
errors = json.loads(form.errors.as_json())
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': 'Non-field error.'}]
}
self.assertEqual(errors, control)
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('<p>Non-field error.</p>',
code='secret',
params={'a': 1, 'b': 2})
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}]
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
errors = json.loads(form.errors.as_json(escape_html=True))
control['__all__'][0]['message'] = '<p>Non-field error.</p>'
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertIsInstance(e, list)
self.assertIn('Foo', e)
self.assertIn('Foo', forms.ValidationError(e))
self.assertEqual(
e.as_text(),
'* Foo\n* Foobar'
)
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
self.assertEqual(
json.loads(e.as_json()),
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}]
)
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class='foobar-error-class')
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({'first_name': 'John'})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></td></tr>"""
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError('Generic validation error')
p = Person({'first_name': 'John', 'last_name': 'Lennon'})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>Generic validation error</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></li>
<li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>"""
)
self.assertHTMLEqual(
p.non_field_errors().as_text(),
'* Generic validation error'
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></p>
<p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>"""
)
def test_errorlist_override(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join(
'<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_baseform_repr(self):
"""
BaseForm.__repr__() should contain some basic information about the
form.
"""
p = Person()
self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>")
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>")
def test_baseform_repr_dont_trigger_validation(self):
"""
BaseForm.__repr__() shouldn't trigger the form validation.
"""
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
repr(p)
self.assertRaises(AttributeError, lambda: p.cleaned_data)
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'})
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_html_safe(self):
class SimpleForm(Form):
username = CharField()
form = SimpleForm()
self.assertTrue(hasattr(SimpleForm, '__html__'))
self.assertEqual(force_text(form), form.__html__())
self.assertTrue(hasattr(form['username'], '__html__'))
self.assertEqual(force_text(form['username']), form['username'].__html__())
| bsd-3-clause |
android-ia/platform_tools_idea | plugins/hg4idea/testData/bin/mercurial/lsprof.py | 96 | 3673 | import sys
from _lsprof import Profiler, profiler_entry
__all__ = ['profile', 'Stats']
def profile(f, *args, **kwds):
"""XXX docstring"""
p = Profiler()
p.enable(subcalls=True, builtins=True)
try:
f(*args, **kwds)
finally:
p.disable()
return Stats(p.getstats())
class Stats(object):
"""XXX docstring"""
def __init__(self, data):
self.data = data
def sort(self, crit="inlinetime"):
"""XXX docstring"""
if crit not in profiler_entry.__dict__:
raise ValueError("Can't sort by %s" % crit)
self.data.sort(key=lambda x: getattr(x, crit), reverse=True)
for e in self.data:
if e.calls:
e.calls.sort(key=lambda x: getattr(x, crit), reverse=True)
def pprint(self, top=None, file=None, limit=None, climit=None):
"""XXX docstring"""
if file is None:
file = sys.stdout
d = self.data
if top is not None:
d = d[:top]
cols = "% 12s %12s %11.4f %11.4f %s\n"
hcols = "% 12s %12s %12s %12s %s\n"
file.write(hcols % ("CallCount", "Recursive", "Total(s)",
"Inline(s)", "module:lineno(function)"))
count = 0
for e in d:
file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
e.inlinetime, label(e.code)))
count += 1
if limit is not None and count == limit:
return
ccount = 0
if climit and e.calls:
for se in e.calls:
file.write(cols % (se.callcount, se.reccallcount,
se.totaltime, se.inlinetime,
" %s" % label(se.code)))
count += 1
ccount += 1
if limit is not None and count == limit:
return
if climit is not None and ccount == climit:
break
def freeze(self):
"""Replace all references to code objects with string
descriptions; this makes it possible to pickle the instance."""
# this code is probably rather ickier than it needs to be!
for i in range(len(self.data)):
e = self.data[i]
if not isinstance(e.code, str):
self.data[i] = type(e)((label(e.code),) + e[1:])
if e.calls:
for j in range(len(e.calls)):
se = e.calls[j]
if not isinstance(se.code, str):
e.calls[j] = type(se)((label(se.code),) + se[1:])
_fn2mod = {}
def label(code):
if isinstance(code, str):
return code
try:
mname = _fn2mod[code.co_filename]
except KeyError:
for k, v in list(sys.modules.iteritems()):
if v is None:
continue
if not isinstance(getattr(v, '__file__', None), str):
continue
if v.__file__.startswith(code.co_filename):
mname = _fn2mod[code.co_filename] = k
break
else:
mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename
return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
if __name__ == '__main__':
import os
sys.argv = sys.argv[1:]
if not sys.argv:
print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
sys.exit(2)
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
stats = profile(execfile, sys.argv[0], globals(), locals())
stats.sort()
stats.pprint()
| apache-2.0 |
Konubinix/weboob | modules/mangareader/module.py | 7 | 1388 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Noé Rubinstein
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderModule, DisplayPage
__all__ = ['MangareaderModule']
class MangareaderModule(GenericComicReaderModule):
NAME = 'mangareader'
DESCRIPTION = 'MangaReader manga reading website'
DOMAIN = 'www.mangareader.net'
BROWSER_PARAMS = dict(
img_src_xpath="//img[@id='img']/@src",
page_list_xpath="//select[@id='pageMenu']/option/@value")
ID_REGEXP = r'[^/]+/[^/]+'
URL_REGEXP = r'.+mangareader.net/(%s).+' % ID_REGEXP
ID_TO_URL = 'http://www.mangareader.net/%s'
PAGES = {r'http://.+\.mangareader.net/.+': DisplayPage} # oh well
| agpl-3.0 |
google/seq2seq | seq2seq/decoders/attention_decoder.py | 4 | 6750 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A basic sequence decoder that performs a softmax based on the RNN state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import tensorflow as tf
from seq2seq.decoders.rnn_decoder import RNNDecoder
from seq2seq.contrib.seq2seq.helper import CustomHelper
class AttentionDecoderOutput(
namedtuple("DecoderOutput", [
"logits", "predicted_ids", "cell_output", "attention_scores",
"attention_context"
])):
"""Augmented decoder output that also includes the attention scores.
"""
pass
class AttentionDecoder(RNNDecoder):
"""An RNN Decoder that uses attention over an input sequence.
Args:
cell: An instance of ` tf.contrib.rnn.RNNCell`
helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding
initial_state: A tensor or tuple of tensors used as the initial cell
state.
vocab_size: Output vocabulary size, i.e. number of units
in the softmax layer
attention_keys: The sequence used to calculate attention scores.
A tensor of shape `[B, T, ...]`.
attention_values: The sequence to attend over.
A tensor of shape `[B, T, input_dim]`.
attention_values_length: Sequence length of the attention values.
An int32 Tensor of shape `[B]`.
attention_fn: The attention function to use. This function map from
`(state, inputs)` to `(attention_scores, attention_context)`.
For an example, see `seq2seq.decoder.attention.AttentionLayer`.
reverse_scores: Optional, an array of sequence length. If set,
reverse the attention scores in the output. This is used for when
a reversed source sequence is fed as an input but you want to
return the scores in non-reversed order.
"""
def __init__(self,
params,
mode,
vocab_size,
attention_keys,
attention_values,
attention_values_length,
attention_fn,
reverse_scores_lengths=None,
name="attention_decoder"):
super(AttentionDecoder, self).__init__(params, mode, name)
self.vocab_size = vocab_size
self.attention_keys = attention_keys
self.attention_values = attention_values
self.attention_values_length = attention_values_length
self.attention_fn = attention_fn
self.reverse_scores_lengths = reverse_scores_lengths
@property
def output_size(self):
return AttentionDecoderOutput(
logits=self.vocab_size,
predicted_ids=tf.TensorShape([]),
cell_output=self.cell.output_size,
attention_scores=tf.shape(self.attention_values)[1:-1],
attention_context=self.attention_values.get_shape()[-1])
@property
def output_dtype(self):
return AttentionDecoderOutput(
logits=tf.float32,
predicted_ids=tf.int32,
cell_output=tf.float32,
attention_scores=tf.float32,
attention_context=tf.float32)
def initialize(self, name=None):
finished, first_inputs = self.helper.initialize()
# Concat empty attention context
attention_context = tf.zeros([
tf.shape(first_inputs)[0],
self.attention_values.get_shape().as_list()[-1]
])
first_inputs = tf.concat([first_inputs, attention_context], 1)
return finished, first_inputs, self.initial_state
def compute_output(self, cell_output):
"""Computes the decoder outputs."""
# Compute attention
att_scores, attention_context = self.attention_fn(
query=cell_output,
keys=self.attention_keys,
values=self.attention_values,
values_length=self.attention_values_length)
# TODO: Make this a parameter: We may or may not want this.
# Transform attention context.
# This makes the softmax smaller and allows us to synthesize information
# between decoder state and attention context
# see https://arxiv.org/abs/1508.04025v5
softmax_input = tf.contrib.layers.fully_connected(
inputs=tf.concat([cell_output, attention_context], 1),
num_outputs=self.cell.output_size,
activation_fn=tf.nn.tanh,
scope="attention_mix")
# Softmax computation
logits = tf.contrib.layers.fully_connected(
inputs=softmax_input,
num_outputs=self.vocab_size,
activation_fn=None,
scope="logits")
return softmax_input, logits, att_scores, attention_context
def _setup(self, initial_state, helper):
self.initial_state = initial_state
def att_next_inputs(time, outputs, state, sample_ids, name=None):
"""Wraps the original decoder helper function to append the attention
context.
"""
finished, next_inputs, next_state = helper.next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name)
next_inputs = tf.concat([next_inputs, outputs.attention_context], 1)
return (finished, next_inputs, next_state)
self.helper = CustomHelper(
initialize_fn=helper.initialize,
sample_fn=helper.sample,
next_inputs_fn=att_next_inputs)
def step(self, time_, inputs, state, name=None):
cell_output, cell_state = self.cell(inputs, state)
cell_output_new, logits, attention_scores, attention_context = \
self.compute_output(cell_output)
if self.reverse_scores_lengths is not None:
attention_scores = tf.reverse_sequence(
input=attention_scores,
seq_lengths=self.reverse_scores_lengths,
seq_dim=1,
batch_dim=0)
sample_ids = self.helper.sample(
time=time_, outputs=logits, state=cell_state)
outputs = AttentionDecoderOutput(
logits=logits,
predicted_ids=sample_ids,
cell_output=cell_output_new,
attention_scores=attention_scores,
attention_context=attention_context)
finished, next_inputs, next_state = self.helper.next_inputs(
time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids)
return (outputs, next_state, next_inputs, finished)
| apache-2.0 |
elastic/elasticsearch-parent | dev-tools/build_release.py | 7 | 17678 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import argparse
import github3
import smtplib
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from os.path import dirname, abspath
"""
This tool builds a release from the a given elasticsearch plugin branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch master --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done.
$ python3 dev_tools/build_release.py --publish --remote origin
The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisite checks
- detect the version to release from the specified branch (--branch) or the current branch
- creates a version release branch & updates pom.xml to point to a release version rather than a snapshot
- builds the artifacts
- commits the new version and merges the version release branch into the source branch
- merges the master release branch into the master branch
- creates a tag and pushes branch and master to the specified origin (--remote)
- publishes the releases to sonatype
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
"""
env = os.environ
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../'))
POM_FILE = ROOT_DIR + '/pom.xml'
##########################################################
#
# Utility methods (log and run)
#
##########################################################
# Log a message
def log(msg):
log_plain('\n%s' % msg)
# Purge the log file
def purge_log():
try:
os.remove(LOG)
except FileNotFoundError:
pass
# Log a message to the LOG file
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
# Run a command and log it
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
##########################################################
#
# Clean logs and check JAVA and Maven
#
##########################################################
try:
purge_log()
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
MVN = 'mvn'
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
run('mvn3 --version', quiet=True)
MVN = 'mvn3'
except RuntimeError:
pass
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
##########################################################
#
# String and file manipulation utils
#
##########################################################
# Utility that returns the name of the release branch for a given version
def release_branch(branchsource, version):
return 'release_branch_%s_%s' % (branchsource, version)
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path, 'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Split a version x.y.z as an array of digits [x,y,z]
def split_version_to_digits(version):
return list(map(int, re.findall(r'\d+', version)))
# Guess the next snapshot version number (increment last digit)
def guess_snapshot(version):
digits = split_version_to_digits(version)
source = '%s.%s.%s' % (digits[0], digits[1], digits[2])
destination = '%s.%s.%s' % (digits[0], digits[1], digits[2] + 1)
return version.replace(source, destination)
# Guess the anchor in generated documentation
# Looks like this "#version-230-for-elasticsearch-13"
def get_doc_anchor(release, esversion):
plugin_digits = split_version_to_digits(release)
es_digits = split_version_to_digits(esversion)
return '#version-%s%s%s-for-elasticsearch-%s%s' % (
plugin_digits[0], plugin_digits[1], plugin_digits[2], es_digits[0], es_digits[1])
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % release
replacement = '<version>%s</version>' % release
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the pom.xml file to the next snapshot
def add_maven_snapshot(pom, release, snapshot):
pattern = '<version>%s</version>' % release
replacement = '<version>%s-SNAPSHOT</version>' % snapshot
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Checks the pom.xml for the release version. <version>2.0.0-SNAPSHOT</version>
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
git_checkout(src_branch)
with open(POM_FILE, encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
# extract a value from pom.xml
def find_from_pom(tag):
with open(POM_FILE, encoding='utf-8') as file:
for line in file:
match = re.search(r'<%s>(.+)</%s>' % (tag, tag), line)
if match:
return match.group(1)
raise RuntimeError('Could not find <%s> in pom.xml file' % (tag))
##########################################################
#
# GIT commands
#
##########################################################
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen('git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
git_checkout(src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(src_branch, release)))
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % file)
# Executes a git commit with 'release [version]' as the commit message
def commit_release(artifact_id, release):
run('git commit -m "prepare release %s-%s"' % (artifact_id, release))
# Commit documentation changes on the master branch
def commit_master(release):
run('git commit -m "update documentation with release %s"' % release)
# Commit next snapshot files
def commit_snapshot():
run('git commit -m "prepare for next development iteration"')
# Put the version tag on on the current commit
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
# Checkout a given branch
def git_checkout(branch):
run('git checkout %s' % branch)
# Merge the release branch with the actual branch
def git_merge(src_branch, release_version):
git_checkout(src_branch)
run('git merge %s' % release_branch(src_branch, release_version))
# Push the actual branch and master branch
def git_push(remote, src_branch, release_version, dry_run):
if not dry_run:
run('git push %s %s master' % (remote, src_branch)) # push the commit and the master
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s %s master' % (remote, src_branch))
##########################################################
#
# Maven commands
#
##########################################################
# Run a given maven command
def run_mvn(*cmd):
for c in cmd:
run('%s; %s -f %s %s' % (java_exe(), MVN, POM_FILE, c))
# Run deploy or package depending on dry_run
# Default to run mvn package
# When run_tests=True a first mvn clean test is run
def build_release(run_tests=False, dry_run=True):
target = 'deploy'
tests = '-DskipTests'
if run_tests:
tests = ''
if dry_run:
target = 'package'
run_mvn('clean %s %s' % (target, tests))
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Plugin Release')
parser.add_argument('--branch', '-b', metavar='master', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-p', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--disable_mail', '-dm', dest='mail', action='store_false',
help='Do not send a release email. Email is sent by default.')
parser.set_defaults(dryrun=True)
parser.set_defaults(mail=True)
args = parser.parse_args()
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
mail = args.mail
if src_branch == 'master':
raise RuntimeError('Can not release the master branch. You need to create another branch before a release')
if not dry_run:
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
release_version = find_release_version(src_branch)
artifact_id = find_from_pom('artifactId')
artifact_name = find_from_pom('name')
artifact_description = find_from_pom('description')
elasticsearch_version = find_from_pom('elasticsearch.version')
print(' Artifact Id: [%s]' % artifact_id)
print(' Release version: [%s]' % release_version)
print(' Elasticsearch: [%s]' % elasticsearch_version)
if elasticsearch_version.find('-SNAPSHOT') != -1:
raise RuntimeError('Can not release with a SNAPSHOT elasticsearch dependency: %s' % elasticsearch_version)
# extract snapshot
default_snapshot_version = guess_snapshot(release_version)
snapshot_version = input('Enter next snapshot version [%s]:' % default_snapshot_version)
snapshot_version = snapshot_version or default_snapshot_version
print(' Next version: [%s-SNAPSHOT]' % snapshot_version)
print(' Artifact Name: [%s]' % artifact_name)
print(' Artifact Description: [%s]' % artifact_description)
if not dry_run:
smoke_test_version = release_version
try:
git_checkout(src_branch)
version_hash = get_head_hash()
run_mvn('clean') # clean the env!
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(src_branch, release_version)))
except RuntimeError:
print('Logs:')
with open(LOG, 'r') as log_file:
print(log_file.read())
sys.exit(-1)
success = False
try:
########################################
# Start update process in version branch
########################################
pending_files = [POM_FILE]
remove_maven_snapshot(POM_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(artifact_id, release_version)
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to sonatype - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(run_tests=run_tests, dry_run=dry_run)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch')
git_merge(src_branch, release_version)
print(' tag')
tag_release(release_version)
add_maven_snapshot(POM_FILE, release_version, snapshot_version)
add_pending_files(*pending_files)
commit_snapshot()
print(' push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
git_push(remote, src_branch, release_version, dry_run)
pending_msg = """
Release successful pending steps:
* close and release sonatype repo: https://oss.sonatype.org/
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/%(artifact_id)s/%(version)s
"""
print(pending_msg % {'version': release_version,
'artifact_id': artifact_id})
success = True
finally:
if not success:
print('Logs:')
with open(LOG, 'r') as log_file:
print(log_file.read())
git_checkout(src_branch)
run('git reset --hard %s' % version_hash)
try:
run('git tag -d v%s' % release_version)
except RuntimeError:
pass
elif dry_run:
print('End of dry_run')
input('Press Enter to reset changes...')
git_checkout(src_branch)
run('git reset --hard %s' % version_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(src_branch, release_version)))
# Checkout the branch we started from
git_checkout(src_branch)
| apache-2.0 |
eschloss/FluFuture | openpds/connectors/funf/views.py | 3 | 6026 | #-*- coding: utf-8 -*-
from django.shortcuts import render_to_response
import datetime
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils import simplejson as json_simple
import dbmerge, os
import dbdecrypt
import decrypt
import sqlite3
import json, ast
from openpds import settings
from openpds.authorization import PDSAuthorization
from openpds.core.models import Profile
from openpds import getInternalDataStore
import pdb
import pdb
upload_dir = settings.SERVER_UPLOAD_DIR
def insert_pds(internalDataStore, token, pds_json):
# specify funf db for mongo, the other funf specific dbs will just ignore
# this parameter
internalDataStore.saveData(pds_json, 'funf')
def write_key(request):
'''write the password used to encrypt funf database files to your PDS'''
response = None
try:
token = request.GET['bearer_token']
scope = "funf_write"
scope = AccessRange.objects.get(key="funf_write")
authenticator = Authenticator(scope=scope)
try:
# Validate the request.
authenticator.validate(request)
except AuthenticationException:
# Return an error response.
return authenticator.error_response(content="You didn't authenticate.")
profile = authenticator.user.get_profile()
profile.funf_password = json.loads(request.raw_post_data)['key']
profile.save()
response_content = json.dumps({'status':'success'})
response = HttpResponse(content=response_content)
except Exception as ex:
print "EXCEPTION:"
print ex
response = HttpResponseBadRequest('failed to write funf key')
return response
def data(request):
'''decrypt funf database files, and upload them to your PDS'''
result = {}
if request.method == 'GET':
template = {'token':request.GET['bearer_token']}
return HttpResponse("File not found", status=404)
pds = None
#scope = AccessRange.objects.get(key="funf_write")
authorization = PDSAuthorization("funf_write", audit_enabled=False)
if (not authorization.is_authorized(request)):
return HttpResponse("Unauthorized", status=401)
scope = 'funf_write'
token = request.GET['bearer_token']
datastore_owner_uuid = request.GET["datastore_owner__uuid"]
datastore_owner, ds_owner_created = Profile.objects.get_or_create(uuid = datastore_owner_uuid)
print "Creating IDS for %s" % datastore_owner_uuid
#internalDataStore = getInternalDataStore(datastore_owner, "Living Lab", "Social Health Tracker", "Activity", token)
internalDataStore = getInternalDataStore(datastore_owner, "Living Lab", "Social Health Tracker", token)
#collection = connection[datastore_owner.getDBName()]["funf"]
funf_password = "changeme"
key = decrypt.key_from_password(str(funf_password))
print "PDS: set_funf_data on uuid: %s" % datastore_owner_uuid
for filename, file in request.FILES.items():
try:
try:
file_path = upload_dir + file.name
write_file(str(file_path), file)
except Exception as ex:
print "failed to write file to "+file_path+". Please make sure you have write permission to the directory set in settings.SERVER_UPLOAD_DIR"
dbdecrypt.decrypt_if_not_db_file(file_path, key)
con = sqlite3.connect(file_path)
cur = con.cursor()
cur.execute("select name, value from data")
inserted = []
for row in cur:
name = convert_string(row[0])
json_insert = clean_keys(json.JSONDecoder().decode(convert_string(row[1])))
#print json_insert$
# Insert into PDS$
pds_data= {}
pds_data['time']=json_insert.get('timestamp')
pds_data['value']=json_insert
pds_data['key']=name
insert_pds(internalDataStore, token, pds_data)
inserted.append(convert_string(json_insert)+'\n')
result = {'success': True, 'rows_inserted': len(inserted)}
print "Inserted %s rows" % len(inserted)
except Exception as e:
print "Exception from funf_connector on pds:"
print "%s"%e
result = {'success':False, 'error_message':e.message}
finally:
response_dict = {"status":"success"}
return HttpResponse(json.dumps(result), content_type='application/json')
TMP_FILE_SALT = '2l;3edF34t34$#%2fruigduy23@%^thfud234!FG%@#620k'
TEMP_DATA_LOCATION = "/data/temp/"
def random_hash(pk):
randstring = "".join([random.choice(string.letters) for x in xrange(20)])
hash = hashlib.sha224(TMP_FILE_SALT + pk + randstring).hexdigest()[0:40]
return hash
def direct_decrypt(file, key, extension=None):
assert key != None
decryptor = DES.new(key) #TODO to make sure the key is 8 bytes long. DES won't accept a shorter key
encrypted_data = file.read()
data = decryptor.decrypt(encrypted_data)
return data
def write_file(filename, file):
if not os.path.exists(upload_dir):
os.mkdir(upload_dir)
filepath = os.path.join(upload_dir, filename)
if os.path.exists(filepath):
backup_file(filepath)
with open(filepath, 'wb') as output_file:
while True:
chunk = file.read(1024)
if not chunk:
break
output_file.write(chunk)
def backup_file(filepath):
shutil.move(filepath, filepath + '.' + str(int(time.time()*1000)) + '.bak')
def convert_string(s):
return "%s" % s
def clean_keys(d):
'''replace all "." with "-" and force keys to lowercase'''
new = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = clean_keys(v)
if isinstance(v, list):
for idx,i in enumerate(v):
if isinstance(i, dict):
v[idx] = clean_keys(i)
new[k.replace('.', '-').lower()] = v
return new
| mit |
indictranstech/erpnext | erpnext/patches/v6_4/fix_journal_entries_due_to_reconciliation.py | 29 | 1884 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
def execute():
frappe.reload_doctype("Sales Invoice Advance")
frappe.reload_doctype("Purchase Invoice Advance")
je_rows = frappe.db.sql("""
select name, parent, reference_type, reference_name, debit, credit
from `tabJournal Entry Account`
where docstatus=1 and date(modified) >= '2015-09-17'
and ((ifnull(debit_in_account_currency, 0)*exchange_rate != ifnull(debit, 0))
or (ifnull(credit_in_account_currency, 0)*exchange_rate != ifnull(credit, 0)))
order by parent
""", as_dict=True)
journal_entries = []
for d in je_rows:
if d.parent not in journal_entries:
journal_entries.append(d.parent)
is_advance_entry=None
if d.reference_type in ("Sales Invoice", "Purchase Invoice") and d.reference_name:
is_advance_entry = frappe.db.sql("""select name from `tab{0}`
where reference_name=%s and reference_row=%s
and ifnull(allocated_amount, 0) > 0 and docstatus=1"""
.format(d.reference_type + " Advance"), (d.parent, d.name))
if is_advance_entry or not (d.debit or d.credit):
frappe.db.sql("""
update `tabJournal Entry Account`
set debit=debit_in_account_currency*exchange_rate,
credit=credit_in_account_currency*exchange_rate
where name=%s""", d.name)
else:
frappe.db.sql("""
update `tabJournal Entry Account`
set debit_in_account_currency=debit/exchange_rate,
credit_in_account_currency=credit/exchange_rate
where name=%s""", d.name)
for d in journal_entries:
print(d)
# delete existing gle
frappe.db.sql("delete from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s", d)
# repost gl entries
je = frappe.get_doc("Journal Entry", d)
je.make_gl_entries() | agpl-3.0 |
weebygames/boto | boto/s3/multidelete.py | 244 | 4757 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto import handler
import xml.sax
class Deleted(object):
"""
A successfully deleted object in a multi-object delete request.
:ivar key: Key name of the object that was deleted.
:ivar version_id: Version id of the object that was deleted.
:ivar delete_marker: If True, indicates the object deleted
was a DeleteMarker.
:ivar delete_marker_version_id: Version ID of the delete marker
deleted.
"""
def __init__(self, key=None, version_id=None,
delete_marker=False, delete_marker_version_id=None):
self.key = key
self.version_id = version_id
self.delete_marker = delete_marker
self.delete_marker_version_id = delete_marker_version_id
def __repr__(self):
if self.version_id:
return '<Deleted: %s.%s>' % (self.key, self.version_id)
else:
return '<Deleted: %s>' % self.key
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'VersionId':
self.version_id = value
elif name == 'DeleteMarker':
if value.lower() == 'true':
self.delete_marker = True
elif name == 'DeleteMarkerVersionId':
self.delete_marker_version_id = value
else:
setattr(self, name, value)
class Error(object):
"""
An unsuccessful deleted object in a multi-object delete request.
:ivar key: Key name of the object that was not deleted.
:ivar version_id: Version id of the object that was not deleted.
:ivar code: Status code of the failed delete operation.
:ivar message: Status message of the failed delete operation.
"""
def __init__(self, key=None, version_id=None,
code=None, message=None):
self.key = key
self.version_id = version_id
self.code = code
self.message = message
def __repr__(self):
if self.version_id:
return '<Error: %s.%s(%s)>' % (self.key, self.version_id,
self.code)
else:
return '<Error: %s(%s)>' % (self.key, self.code)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'VersionId':
self.version_id = value
elif name == 'Code':
self.code = value
elif name == 'Message':
self.message = value
else:
setattr(self, name, value)
class MultiDeleteResult(object):
"""
The status returned from a MultiObject Delete request.
:ivar deleted: A list of successfully deleted objects. Note that if
the quiet flag was specified in the request, this list will
be empty because only error responses would be returned.
:ivar errors: A list of unsuccessfully deleted objects.
"""
def __init__(self, bucket=None):
self.bucket = None
self.deleted = []
self.errors = []
def startElement(self, name, attrs, connection):
if name == 'Deleted':
d = Deleted()
self.deleted.append(d)
return d
elif name == 'Error':
e = Error()
self.errors.append(e)
return e
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
| mit |
trhura/python-myanmar | myanmar/romanizer/ipa.py | 1 | 3114 | # ipa.py - ipa transliteration module
# coding: utf-8
# The MIT License (MIT)
# Credit for IPA rules - Wikipedia, LionSlayer ...
# Copyright (c) 2018 Thura Hlaing
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import json
import pkgutil
class IPA():
table = json.loads(
pkgutil.get_data('myanmar', 'data/ipa.json').decode('utf-8')
)
vowels = 'aáeèioóôu'
@classmethod
def normalize(cls, ipa, prev):
if prev:
ipa = cls.add_ə(ipa, prev)
ipa = cls.change_k_to_g(ipa, prev)
ipa = cls.change_s_to_z(ipa, prev)
ipa = cls.change_p_to_b(ipa, prev)
ipa = cls.change_t_to_d(ipa, prev)
return ipa
@classmethod
def add_ə(cls, ipa, prev):
prev_len = 0
if prev[-1] == 'ʰ' and len(prev[-2]) == 1:
prev_len = 1
if len(prev) == 1 or prev_len == 1:
ipa = 'ə' + ipa
return ipa
@classmethod
def change_k_to_g(cls, ipa, prev):
# change k to g after vowel sound
if ipa.startswith('k') and cls.ends_with_vowel(prev):
ipa = 'g' + ipa[1:]
return ipa
@classmethod
def change_s_to_z(cls, ipa, prev):
# change s to z after vowel sound
if ipa.startswith('s') and cls.ends_with_vowel(prev):
ipa = 'z' + ipa[1:]
return ipa
@classmethod
def change_p_to_b(cls, ipa, prev):
# change pa to ba after vowel sound
if ipa.startswith('p') and cls.has_vowel(ipa):
ipa = 'b' + ipa[1:]
return ipa
@classmethod
def change_t_to_d(cls, ipa, prev):
# change t to d after vowel sound
startswitht = ipa.startswith('t') and not ipa.startswith('th')
if startswitht and cls.ends_with_vowel(prev):
ipa = 'd' + ipa[1:]
return ipa
@classmethod
def ends_with_vowel(cls, ipa):
return ipa[-1] in 'aàeioun' or ipa.endswith('ng')
@classmethod
def has_vowel(cls, ipa):
# if any of cls.vowels exists in IPA
return any(ipa.find(v) != -1 for v in cls.vowels)
| mit |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/sessions/middleware.py | 90 | 1813 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None)
return response
| apache-2.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_dynamicclassattribute.py | 19 | 9795 | # Test case for DynamicClassAttribute
# more tests are in test_descr
import abc
import sys
import unittest
from types import DynamicClassAttribute
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@DynamicClassAttribute
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
spam = BaseClass.__dict__['spam']
@spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = DynamicClassAttribute(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
spam = PropertyDocBase.__dict__['spam']
@spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
spam = BaseClass.__dict__['spam']
@spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@DynamicClassAttribute
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class ClassWithAbstractVirtualProperty(metaclass=abc.ABCMeta):
@DynamicClassAttribute
@abc.abstractmethod
def color():
pass
class ClassWithPropertyAbstractVirtual(metaclass=abc.ABCMeta):
@abc.abstractmethod
@DynamicClassAttribute
def color():
pass
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.__dict__['spam'].__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.__dict__['spam'].__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.__dict__['spam'].__doc__, "spam spam spam")
self.assertEqual(sub.__class__.__dict__['spam'].__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.__dict__['spam'].__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.__dict__['spam'].__doc__, "new docstring")
def test_property___isabstractmethod__descriptor(self):
for val in (True, False, [], [1], '', '1'):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = val
foo = DynamicClassAttribute(foo)
self.assertIs(C.__dict__['foo'].__isabstractmethod__, bool(val))
# check that the DynamicClassAttribute's __isabstractmethod__ descriptor does the
# right thing when presented with a value that fails truth testing:
class NotBool(object):
def __bool__(self):
raise ValueError()
__len__ = __bool__
with self.assertRaises(ValueError):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = NotBool()
foo = DynamicClassAttribute(foo)
def test_abstract_virtual(self):
self.assertRaises(TypeError, ClassWithAbstractVirtualProperty)
self.assertRaises(TypeError, ClassWithPropertyAbstractVirtual)
class APV(ClassWithPropertyAbstractVirtual):
pass
self.assertRaises(TypeError, APV)
class AVP(ClassWithAbstractVirtualProperty):
pass
self.assertRaises(TypeError, AVP)
class Okay1(ClassWithAbstractVirtualProperty):
@DynamicClassAttribute
def color(self):
return self._color
def __init__(self):
self._color = 'cyan'
with self.assertRaises(AttributeError):
Okay1.color
self.assertEqual(Okay1().color, 'cyan')
class Okay2(ClassWithAbstractVirtualProperty):
@DynamicClassAttribute
def color(self):
return self._color
def __init__(self):
self._color = 'magenta'
with self.assertRaises(AttributeError):
Okay2.color
self.assertEqual(Okay2().color, 'magenta')
# Issue 5890: subclasses of DynamicClassAttribute do not preserve method __doc__ strings
class PropertySub(DynamicClassAttribute):
"""This is a subclass of DynamicClassAttribute"""
class PropertySubSlots(DynamicClassAttribute):
"""This is a subclass of DynamicClassAttribute that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
@unittest.skipIf(hasattr(PropertySubSlots, '__doc__'),
"__doc__ is already present, __slots__ will have no effect")
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
print('\n',spam.__doc__)
except AttributeError:
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in DynamicClassAttribute subclass"""
return 1
self.assertEqual(
Foo.__dict__['spam'].__doc__,
"spam wrapped in DynamicClassAttribute subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in DynamicClassAttribute subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.__dict__['spam'].__doc__,
"spam wrapped in DynamicClassAttribute subclass")
class FooSub(Foo):
spam = Foo.__dict__['spam']
@spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.__dict__['spam'].__doc__,
"spam wrapped in DynamicClassAttribute subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.__dict__['spam'].__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
spam = FooBase.__dict__['spam']
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.__dict__['spam'].__doc__, "a new docstring")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
StephenWeber/ansible | lib/ansible/modules/cloud/openstack/os_nova_host_aggregate.py | 21 | 6727 | #!/usr/bin/python
# Copyright 2016 Jakub Jursa <jakub.jursa1@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_nova_host_aggregate
short_description: Manage OpenStack host aggregates
extends_documentation_fragment: openstack
author: "Jakub Jursa"
version_added: "2.3"
description:
- Create, update, or delete OpenStack host aggregates. If a aggregate
with the supplied name already exists, it will be updated with the
new name, new availability zone, new metadata and new list of hosts.
options:
name:
description: Name of the aggregate.
required: true
metadata:
description: Metadata dict.
required: false
default: None
availability_zone:
description: Availability zone to create aggregate into.
required: false
default: None
hosts:
description: List of hosts to set for an aggregate.
required: false
default: None
state:
description: Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a host aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: present
name: db_aggregate
hosts:
- host1
- host2
metadata:
type: dbcluster
# Delete an aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: absent
name: db_aggregate
'''
RETURN = '''
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, aggregate):
new_metadata = (module.params['metadata'] or {})
new_metadata['availability_zone'] = module.params['availability_zone']
if (module.params['name'] != aggregate.name) or \
(module.params['hosts'] is not None and module.params['hosts'] != aggregate.hosts) or \
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or \
(module.params['metadata'] is not None and new_metadata != aggregate.metadata):
return True
return False
def _system_state_change(module, aggregate):
state = module.params['state']
if state == 'absent' and aggregate:
return True
if state == 'present':
if aggregate is None:
return True
return _needs_update(module, aggregate)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
metadata=dict(required=False, default=None, type='dict'),
availability_zone=dict(required=False, default=None),
hosts=dict(required=False, default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.9.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.9.0")
name = module.params['name']
metadata = module.params['metadata']
availability_zone = module.params['availability_zone']
hosts = module.params['hosts']
state = module.params['state']
if metadata is not None:
metadata.pop('availability_zone', None)
try:
cloud = shade.operator_cloud(**module.params)
aggregates = cloud.search_aggregates(name_or_id=name)
if len(aggregates) == 1:
aggregate = aggregates[0]
elif len(aggregates) == 0:
aggregate = None
else:
raise Exception("Should not happen")
if module.check_mode:
module.exit_json(changed=_system_state_change(module, aggregate))
if state == 'present':
if aggregate is None:
aggregate = cloud.create_aggregate(name=name,
availability_zone=availability_zone)
if hosts:
for h in hosts:
cloud.add_host_to_aggregate(aggregate.id, h)
if metadata:
cloud.set_aggregate_metadata(aggregate.id, metadata)
changed = True
else:
if _needs_update(module, aggregate):
if availability_zone is not None:
aggregate = cloud.update_aggregate(aggregate.id,
name=name, availability_zone=availability_zone)
if metadata is not None:
metas = metadata
for i in (set(aggregate.metadata.keys()) - set(metadata.keys())):
if i != 'availability_zone':
metas[i] = None
cloud.set_aggregate_metadata(aggregate.id, metas)
if hosts is not None:
for i in (set(aggregate.hosts) - set (hosts)):
cloud.remove_host_from_aggregate(aggregate.id, i)
for i in (set(hosts) - set(aggregate.hosts)):
cloud.add_host_to_aggregate(aggregate.id, i)
changed = True
else:
changed = False
module.exit_json(changed=changed)
elif state == 'absent':
if aggregate is None:
changed=False
else:
cloud.delete_aggregate(aggregate.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
bikong2/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
CybOXProject/python-cybox | cybox/test/objects/image_file_test.py | 1 | 2412 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.objects.image_file_object import ImageFile
from cybox.common import Hash, String
from cybox.compat import long
import cybox.test
from cybox.test import EntityTestCase
from cybox.test.objects import ObjectTestCase
class TestImageFile(ObjectTestCase, unittest.TestCase):
object_type = "ImageFileObjectType"
klass = ImageFile
_full_dict = {
'image_is_compressed': True,
'image_file_format': u("JPG"),
'image_height': 10000,
'image_width': 3000,
'bits_per_pixel': 9000,
'compression_algorithm': u("An algorithm"),
'is_packed': False,
'is_masqueraded': True,
'file_name': u("example.txt"),
'file_path': {'value': u("C:\\Temp"),
'fully_qualified': True},
'device_path': u("\\Device\\CdRom0"),
'full_path': u("C:\\Temp\\example.txt"),
'file_extension': u("txt"),
'size_in_bytes': long(1024),
'magic_number': u("D0CF11E0"),
'file_format': u("ASCII Text"),
'hashes': [
{
'type': Hash.TYPE_MD5,
'simple_hash_value': u("0123456789abcdef0123456789abcdef")
}
],
'digital_signatures': [
{
'certificate_issuer': u("Microsoft"),
'certificate_subject': u("Notepad"),
}
],
'modified_time': "2010-11-06T02:02:02+08:00",
'accessed_time': "2010-11-07T02:03:02+09:00",
'created_time': "2010-11-08T02:04:02+10:00",
'user_owner': u("sballmer"),
'packer_list': [
{
'name': u("UPX"),
'version': u("3.91"),
}
],
'peak_entropy': 7.454352453,
'sym_links': [u("../link_destination")],
'byte_runs': [{'offset': 16, 'byte_run_data': u("1A2B3C4D")}],
'extracted_features': {
'strings': [{'string_value': u("string from the file")}],
},
'encryption_algorithm': u("RC4"),
'compression_method': u("deflate"),
'compression_version': u("1.0"),
'compression_comment': u("This has been compressed"),
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/test/test_pstats.py | 175 | 1295 | import unittest
from test import support
from io import StringIO
import pstats
class AddCallersTestCase(unittest.TestCase):
"""Tests for pstats.add_callers helper."""
def test_combine_results(self):
# pstats.add_callers should combine the call results of both target
# and source by adding the call time. See issue1269.
# new format: used by the cProfile module
target = {"a": (1, 2, 3, 4)}
source = {"a": (1, 2, 3, 4), "b": (5, 6, 7, 8)}
new_callers = pstats.add_callers(target, source)
self.assertEqual(new_callers, {'a': (2, 4, 6, 8), 'b': (5, 6, 7, 8)})
# old format: used by the profile module
target = {"a": 1}
source = {"a": 1, "b": 5}
new_callers = pstats.add_callers(target, source)
self.assertEqual(new_callers, {'a': 2, 'b': 5})
class StatsTestCase(unittest.TestCase):
def setUp(self):
stats_file = support.findfile('pstats.pck')
self.stats = pstats.Stats(stats_file)
def test_add(self):
stream = StringIO()
stats = pstats.Stats(stream=stream)
stats.add(self.stats, self.stats)
def test_main():
support.run_unittest(
AddCallersTestCase,
StatsTestCase,
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_theilsen.py | 76 | 3848 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
# #############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
# #############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| gpl-3.0 |
meimz/linux | tools/perf/scripts/python/netdev-times.py | 1544 | 15191 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
Gobberwart/PokemonGo-Bot | pokemongo_bot/base_task.py | 9 | 1463 | import logging
import time
class BaseTask(object):
TASK_API_VERSION = 1
def __init__(self, bot, config):
"""
:param bot:
:type bot: pokemongo_bot.PokemonGoBot
:param config:
:return:
"""
self.bot = bot
self.config = config
self._validate_work_exists()
self.logger = logging.getLogger(type(self).__name__)
self.enabled = config.get('enabled', True)
self.last_log_time = time.time()
self.initialize()
def _validate_work_exists(self):
method = getattr(self, 'work', None)
if not method or not callable(method):
raise NotImplementedError('Missing "work" method')
def emit_event(self, event, sender=None, level='info', formatted='', data={}):
if not sender:
sender=self
# Print log only if X seconds are passed from last log
try:
if (time.time() - self.last_log_time) >= self.config.get('log_interval', 0):
self.last_log_time = time.time()
self.bot.event_manager.emit(
event,
sender=sender,
level=level,
formatted=formatted,
data=data
)
except AttributeError:
if (time.time() - self.last_log_time) > 0:
self.last_log_time = time.time()
self.bot.event_manager.emit(
event,
sender=sender,
level=level,
formatted=formatted,
data=data
)
def initialize(self):
pass
| mit |
JacerOmri/PokemonGo-Bot-Desktop | pywin/Lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
mdodsworth/hadoop-common | src/contrib/hod/testing/main.py | 182 | 2928 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re
myPath = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myPath)
testingDir = os.path.join(rootDirectory, "testing")
sys.path.append(rootDirectory)
from testing.lib import printSeparator, printLine
moduleList = []
allList = []
excludes = [
]
# Build a module list by scanning through all files in testingDir
for file in os.listdir(testingDir):
if(re.search(r".py$", file) and re.search(r"^test", file)):
# All .py files with names starting in 'test'
module = re.sub(r"^test","",file)
module = re.sub(r".py$","",module)
allList.append(module)
if module not in excludes:
moduleList.append(module)
printLine("All testcases - %s" % allList)
printLine("Excluding the testcases - %s" % excludes)
printLine("Executing the testcases - %s" % moduleList)
testsResult = 0
# Now import each of these modules and start calling the corresponding
#testSuite methods
for moduleBaseName in moduleList:
try:
module = "testing.test" + moduleBaseName
suiteCaller = "Run" + moduleBaseName + "Tests"
printSeparator()
printLine("Running %s" % suiteCaller)
# Import the corresponding test cases module
imported_module = __import__(module , fromlist=[suiteCaller] )
# Call the corresponding suite method now
testRes = getattr(imported_module, suiteCaller)()
testsResult = testsResult + testRes
printLine("Finished %s. TestSuite Result : %s\n" % \
(suiteCaller, testRes))
except ImportError, i:
# Failed to import a test module
printLine(i)
testsResult = testsResult + 1
pass
except AttributeError, n:
# Failed to get suiteCaller from a test module
printLine(n)
testsResult = testsResult + 1
pass
except Exception, e:
# Test module suiteCaller threw some exception
printLine("%s failed. \nReason : %s" % (suiteCaller, e))
printLine("Skipping %s" % suiteCaller)
testsResult = testsResult + 1
pass
if testsResult != 0:
printSeparator()
printLine("Total testcases with failure or error : %s" % testsResult)
sys.exit(testsResult)
| apache-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/webapp2-2.5.2/webapp2_extras/config.py | 67 | 7380 | # -*- coding: utf-8 -*-
"""
webapp2_extras.config
=====================
Configuration object for webapp2.
This module is deprecated. See :class:`webapp2.WSGIApplication.config`.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
from __future__ import absolute_import
import warnings
import webapp2
warnings.warn(DeprecationWarning(
'webapp2_extras.config is deprecated. '
'The WSGIApplication uses webapp2.Config instead.'),
stacklevel=1)
#: Value used for missing default values.
DEFAULT_VALUE = object()
#: Value used for required values.
REQUIRED_VALUE = object()
class Config(dict):
"""A simple configuration dictionary keyed by module name. This is a
dictionary of dictionaries. It requires all values to be dictionaries
and applies updates and default values to the inner dictionaries instead
of the first level one.
The configuration object can be set as a ``config`` attribute of
:class:`WSGIApplication`::
import webapp2
from webapp2_extras import config as webapp2_config
my_config = {}
my_config['my.module'] = {
'foo': 'bar',
}
app = webapp2.WSGIApplication(routes=[
webapp2.Route('/', name='home', handler=MyHandler)
])
app.config = webapp2_config.Config(my_config)
Then to read configuration values, get them from the app::
class MyHandler(RequestHandler):
def get(self):
foo = self.app.config['my.module']['foo']
# ...
"""
#: Loaded module configurations.
loaded = None
def __init__(self, values=None, defaults=None):
"""Initializes the configuration object.
:param values:
A dictionary of configuration dictionaries for modules.
:param defaults:
A dictionary of configuration dictionaries for initial default
values. These modules are marked as loaded.
"""
self.loaded = []
if values is not None:
assert isinstance(values, dict)
for module, config in values.iteritems():
self.update(module, config)
if defaults is not None:
assert isinstance(defaults, dict)
for module, config in defaults.iteritems():
self.setdefault(module, config)
self.loaded.append(module)
def __getitem__(self, module):
"""Returns the configuration for a module. If it is not already
set, loads a ``default_config`` variable from the given module and
updates the configuration with those default values
Every module that allows some kind of configuration sets a
``default_config`` global variable that is loaded by this function,
cached and used in case the requested configuration was not defined
by the user.
:param module:
The module name.
:returns:
A configuration value.
"""
if module not in self.loaded:
# Load default configuration and update config.
values = webapp2.import_string(module + '.default_config',
silent=True)
if values:
self.setdefault(module, values)
self.loaded.append(module)
try:
return dict.__getitem__(self, module)
except KeyError:
raise KeyError('Module %r is not configured.' % module)
def __setitem__(self, module, values):
"""Sets a configuration for a module, requiring it to be a dictionary.
:param module:
A module name for the configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
dict.__setitem__(self, module, SubConfig(module, values))
def get(self, module, default=DEFAULT_VALUE):
"""Returns a configuration for a module. If default is not provided,
returns an empty dict if the module is not configured.
:param module:
The module name.
:params default:
Default value to return if the module is not configured. If not
set, returns an empty dict.
:returns:
A module configuration.
"""
if default is DEFAULT_VALUE:
default = {}
return dict.get(self, module, default)
def setdefault(self, module, values):
"""Sets a default configuration dictionary for a module.
:param module:
The module to set default configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
:returns:
The module configuration dictionary.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
if module not in self:
dict.__setitem__(self, module, SubConfig(module))
module_dict = dict.__getitem__(self, module)
for key, value in values.iteritems():
module_dict.setdefault(key, value)
return module_dict
def update(self, module, values):
"""Updates the configuration dictionary for a module.
:param module:
The module to update the configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
if module not in self:
dict.__setitem__(self, module, SubConfig(module))
dict.__getitem__(self, module).update(values)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module and optionally a key.
Will raise a KeyError if they the module is not configured or the key
doesn't exist and a default is not provided.
:param module:
The module name.
:params key:
The configuration key.
:param default:
Default value to return if the key doesn't exist.
:returns:
A module configuration.
"""
module_dict = self.__getitem__(module)
if key is None:
return module_dict
return module_dict.get(key, default)
class SubConfig(dict):
def __init__(self, module, values=None):
dict.__init__(self, values or ())
self.module = module
def __getitem__(self, key):
try:
value = dict.__getitem__(self, key)
except KeyError:
raise KeyError('Module %r does not have the config key %r' %
(self.module, key))
if value is REQUIRED_VALUE:
raise KeyError('Module %r requires the config key %r to be '
'set.' % (self.module, key))
return value
def get(self, key, default=None):
if key not in self:
value = default
else:
value = dict.__getitem__(self, key)
if value is REQUIRED_VALUE:
raise KeyError('Module %r requires the config key %r to be '
'set.' % (self.module, key))
return value
| mit |
flexVDI/cerbero | cerbero/hacks.py | 5 | 5184 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import sys
### XML Hacks ###
import re
import StringIO
from xml.dom import minidom
from cerbero.utils import etree
oldwrite = etree.ElementTree.write
def pretify(string, pretty_print=True):
parsed = minidom.parseString(string)
# See:http://www.hoboes.com/Mimsy/hacks/geektool-taskpaper-and-xml/
fix = re.compile(r'((?<=>)(\n[\t]*)(?=[^<\t]))|(?<=[^>\t])(\n[\t]*)(?=<)')
return re.sub(fix, '', parsed.toprettyxml())
def write(self, file_or_filename, encoding=None, pretty_print=False):
if not pretty_print:
return oldwrite(self, file_or_filename, encoding)
tmpfile = StringIO.StringIO()
oldwrite(self, tmpfile, encoding)
tmpfile.seek(0)
if hasattr(file_or_filename, "write"):
out_file = file_or_filename
else:
out_file = open(file_or_filename, "wb")
out_file.write(pretify(tmpfile.read()))
if not hasattr(file_or_filename, "write"):
out_file.close()
etree.ElementTree.write = write
### Windows Hacks ###
# On windows, python transforms all enviroment variables to uppercase,
# but we need lowercase ones to override configure options like
# am_cv_python_platform
environclass = os.environ.__class__
import UserDict
class _Environ(environclass):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = {}
for k, v in environ.items():
self.data[k] = v
def __setitem__(self, key, item):
os.putenv(key, item)
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
def __delitem__(self, key):
os.putenv(key, '')
del self.data[key]
def pop(self, key, *args):
os.putenv(key, '')
return self.data.pop(key, *args)
def has_key(self, key):
return key in self.data
def __contains__(self, key):
return key in self.data
def get(self, key, failobj=None):
return self.data.get(key, failobj)
# we don't want backlashes in paths as it breaks shell commands
oldexpanduser = os.path.expanduser
oldabspath = os.path.abspath
oldrealpath = os.path.realpath
def join(*args):
return '/'.join(args)
def expanduser(path):
return oldexpanduser(path).replace('\\', '/')
def abspath(path):
return oldabspath(path).replace('\\', '/')
def realpath(path):
return oldrealpath(path).replace('\\', '/')
if sys.platform.startswith('win'):
os.environ = _Environ(os.environ)
os.path.join = join
os.path.expanduser = expanduser
os.path.abspath = abspath
os.path.realpath = realpath
import stat
import shutil
from shutil import rmtree as shutil_rmtree
from cerbero.utils.shell import call as shell_call
def rmtree(path, ignore_errors=False, onerror=None):
'''
shutil.rmtree often fails with access denied. On Windows this happens when
a file is readonly. On Linux this can happen when a directory doesn't have
the appropriate permissions (Ex: chmod 200) and many other cases.
'''
def force_removal(func, path, excinfo):
'''
This is the only way to ensure that readonly files are deleted by
rmtree on Windows. See: http://bugs.python.org/issue19643
'''
# Due to the way 'onerror' is implemented in shutil.rmtree, errors
# encountered while listing directories cannot be recovered from. So if
# a directory cannot be listed, shutil.rmtree assumes that it is empty
# and it tries to call os.remove() on it which fails. This is just one
# way in which this can fail, so for robustness we just call 'rm' if we
# get an OSError while trying to remove a specific path.
# See: http://bugs.python.org/issue8523
try:
os.chmod(path, stat.S_IWRITE)
func(path)
except OSError:
shell_call('rm -rf ' + path)
# We try to not use `rm` because on Windows because it's about 20-30x slower
if not onerror:
shutil_rmtree(path, ignore_errors, onerror=force_removal)
else:
shutil_rmtree(path, ignore_errors, onerror)
shutil.rmtree = rmtree
### OS X Hacks ###
# use cURL to download instead of wget
if sys.platform.startswith('darwin'):
import cerbero.utils.shell as cshell
del cshell.download
cshell.download = cshell.download_curl
| lgpl-2.1 |
log2timeline/dfvfs | dfvfs/path/ntfs_path_spec.py | 2 | 2400 | # -*- coding: utf-8 -*-
"""The path NTFS specification implementation."""
from dfvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class NTFSPathSpec(path_spec.PathSpec):
"""NTFS path specification.
Attributes:
data_stream (str): data stream name, where None indicates the default
data stream.
location (str): location.
mft_attribute (int): $FILE_NAME MFT attribute index, where the first
attribute is indicated by 0.
mft_entry (int): MFT entry, where the first entry is indicated by 0.
"""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_NTFS
def __init__(
self, data_stream=None, location=None, mft_attribute=None,
mft_entry=None, parent=None, **kwargs):
"""Initializes a path specification.
Note that the NTFS path specification must have a parent.
Args:
data_stream (Optional[str]): data stream name, where None indicates
the default data stream.
location (Optional[str]): location.
mft_attribute (Optional[int]): $FILE_NAME MFT attribute index, where
the first attribute is indicated by 0.
mft_entry (Optional[int]): MFT entry, where the first entry is indicated
by 0.
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when location and mft_entry, or parent are not set.
"""
if (not location and mft_entry is None) or not parent:
raise ValueError('Missing location and MFT entry, or parent value.')
super(NTFSPathSpec, self).__init__(parent=parent, **kwargs)
self.data_stream = data_stream
self.location = location
self.mft_attribute = mft_attribute
self.mft_entry = mft_entry
@property
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.data_stream:
string_parts.append('data stream: {0:s}'.format(self.data_stream))
if self.location is not None:
string_parts.append('location: {0:s}'.format(self.location))
if self.mft_attribute is not None:
string_parts.append('MFT attribute: {0:d}'.format(self.mft_attribute))
if self.mft_entry is not None:
string_parts.append('MFT entry: {0:d}'.format(self.mft_entry))
return self._GetComparable(sub_comparable_string=', '.join(string_parts))
factory.Factory.RegisterPathSpec(NTFSPathSpec)
| apache-2.0 |
guybedo/minos | tests/model/parameters_test.py | 1 | 5078 | '''
Created on Feb 7, 2017
@author: julien
'''
import unittest
from keras.layers.core import Dense
from minos.experiment.experiment import ExperimentParameters, Experiment,\
check_experiment_parameters, InvalidParametersException
from minos.experiment.training import Training
from minos.model.parameter import random_param_value, int_param, float_param,\
string_param
from minos.model.parameters import reference_parameters,\
register_custom_activation, register_custom_layer
class ParametersTest(unittest.TestCase):
def test_parameters(self):
experiment_parameters = ExperimentParameters(use_default_values=False)
for layer in reference_parameters['layers'].keys():
for name, _value in reference_parameters['layers'][layer].items():
self.assertIsNotNone(
experiment_parameters.get_layer_parameter('%s.%s' % (layer, name)),
'Parameter %s should exist for layer %s' % (name, layer))
def test_custom_parameters(self):
experiment_parameters = ExperimentParameters()
experiment_parameters.layout_parameter('blocks', int_param(1, 10))
param = experiment_parameters.get_layout_parameter('blocks')
self.assertTrue(
1 == param.lo and 10 == param.hi,
'Should have set values')
experiment_parameters.layout_parameter('layers', int_param(1, 3))
param = experiment_parameters.get_layout_parameter('layers')
self.assertTrue(
1 == param.lo and 3 == param.hi,
'Should have set values')
experiment_parameters.layer_parameter('Dense.activation', string_param(['relu', 'tanh']))
param = experiment_parameters.get_layer_parameter('Dense.activation')
self.assertTrue(
'relu' == param.values[0] and 'tanh' == param.values[1],
'Should have set values')
def test_random_value(self):
param = int_param(values=list(range(10)))
val = random_param_value(param)
self.assertTrue(
isinstance(val, int),
'Should be an int')
self.assertTrue(
val in param.values,
'Value should be in predefined values')
param = float_param(values=[i * 0.1 for i in range(10)])
val = random_param_value(param)
self.assertTrue(
isinstance(val, float),
'Should be a float')
self.assertTrue(
val in param.values,
'Value should be in predefined values')
param = float_param(lo=.5, hi=.7)
for _ in range(100):
val = random_param_value(param)
self.assertTrue(
isinstance(val, float),
'Should be a float')
self.assertTrue(
val <= param.hi and val >= param.lo,
'Value should be in range')
param = {
'a': float_param(optional=False),
'b': float_param(optional=False)}
for _ in range(10):
val = random_param_value(param)
self.assertTrue(
isinstance(val, dict),
'Should be a dict')
self.assertEqual(
len(param), len(val),
'Should respect non optional setting')
param = {
'a': float_param(optional=True),
'b': float_param(optional=True)}
for _ in range(10):
val = random_param_value(param)
self.assertTrue(
isinstance(val, dict),
'Should be a dict')
self.assertTrue(
len(val) >= 0 and len(val) <= len(param),
'Should respect non optional setting')
def test_search_parameters(self):
experiment = Experiment(
label='test',
parameters=ExperimentParameters(use_default_values=False))
valid_parameters = True
try:
check_experiment_parameters(experiment)
except InvalidParametersException:
valid_parameters = False
self.assertFalse(valid_parameters, 'Should have raised an exception')
def test_custom_definitions(self):
def custom_activation(x):
return x
register_custom_activation('custom_activation', custom_activation)
register_custom_layer('Dense2', Dense, dict(test='test'))
experiment_parameters = ExperimentParameters(use_default_values=False)
custom_params = experiment_parameters.get_layer_parameter('Dense2')
self.assertIsNotNone(
custom_params,
'Should have registered custom layer')
self.assertTrue(
'test' in custom_params,
'Should have registered custom layer params')
activations = experiment_parameters.get_layer_parameter('Dense.activation')
self.assertTrue(
'custom_activation' in activations.values,
'Should have registered custom_activation')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| apache-2.0 |
mer-tools/chromium-trace | trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/dispatch.py | 35 | 14459 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Dispatch WebSocket request.
"""
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import msgutil
from mod_pywebsocket import mux
from mod_pywebsocket import stream
from mod_pywebsocket import util
_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
_SOURCE_SUFFIX = '_wsh.py'
_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
'web_socket_passive_closing_handshake')
class DispatchException(Exception):
"""Exception in dispatching WebSocket request."""
def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
super(DispatchException, self).__init__(name)
self.status = status
def _default_passive_closing_handshake_handler(request):
"""Default web_socket_passive_closing_handshake handler."""
return common.STATUS_NORMAL_CLOSURE, ''
def _normalize_path(path):
"""Normalize path.
Args:
path: the path to normalize.
Path is converted to the absolute path.
The input path can use either '\\' or '/' as the separator.
The normalized path always uses '/' regardless of the platform.
"""
path = path.replace('\\', os.path.sep)
path = os.path.realpath(path)
path = path.replace('\\', '/')
return path
def _create_path_to_resource_converter(base_dir):
"""Returns a function that converts the path of a WebSocket handler source
file to a resource string by removing the path to the base directory from
its head, removing _SOURCE_SUFFIX from its tail, and replacing path
separators in it with '/'.
Args:
base_dir: the path to the base directory.
"""
base_dir = _normalize_path(base_dir)
base_len = len(base_dir)
suffix_len = len(_SOURCE_SUFFIX)
def converter(path):
if not path.endswith(_SOURCE_SUFFIX):
return None
# _normalize_path must not be used because resolving symlink breaks
# following path check.
path = path.replace('\\', '/')
if not path.startswith(base_dir):
return None
return path[base_len:-suffix_len]
return converter
def _enumerate_handler_file_paths(directory):
"""Returns a generator that enumerates WebSocket Handler source file names
in the given directory.
"""
for root, unused_dirs, files in os.walk(directory):
for base in files:
path = os.path.join(root, base)
if _SOURCE_PATH_PATTERN.search(path):
yield path
class _HandlerSuite(object):
"""A handler suite holder class."""
def __init__(self, do_extra_handshake, transfer_data,
passive_closing_handshake):
self.do_extra_handshake = do_extra_handshake
self.transfer_data = transfer_data
self.passive_closing_handshake = passive_closing_handshake
def _source_handler_file(handler_definition):
"""Source a handler definition string.
Args:
handler_definition: a string containing Python statements that define
handler functions.
"""
global_dic = {}
try:
exec handler_definition in global_dic
except Exception:
raise DispatchException('Error in sourcing handler:' +
util.get_stack_trace())
passive_closing_handshake_handler = None
try:
passive_closing_handshake_handler = _extract_handler(
global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
except Exception:
passive_closing_handshake_handler = (
_default_passive_closing_handshake_handler)
return _HandlerSuite(
_extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
_extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
passive_closing_handshake_handler)
def _extract_handler(dic, name):
"""Extracts a callable with the specified name from the given dictionary
dic.
"""
if name not in dic:
raise DispatchException('%s is not defined.' % name)
handler = dic[name]
if not callable(handler):
raise DispatchException('%s is not callable.' % name)
return handler
class Dispatcher(object):
"""Dispatches WebSocket requests.
This class maintains a map from resource name to handlers.
"""
def __init__(
self, root_dir, scan_dir=None,
allow_handlers_outside_root_dir=True):
"""Construct an instance.
Args:
root_dir: The directory where handler definition files are
placed.
scan_dir: The directory where handler definition files are
searched. scan_dir must be a directory under root_dir,
including root_dir itself. If scan_dir is None,
root_dir is used as scan_dir. scan_dir can be useful
in saving scan time when root_dir contains many
subdirectories.
allow_handlers_outside_root_dir: Scans handler files even if their
canonical path is not under root_dir.
"""
self._logger = util.get_class_logger(self)
self._handler_suite_map = {}
self._source_warnings = []
if scan_dir is None:
scan_dir = root_dir
if not os.path.realpath(scan_dir).startswith(
os.path.realpath(root_dir)):
raise DispatchException('scan_dir:%s must be a directory under '
'root_dir:%s.' % (scan_dir, root_dir))
self._source_handler_files_in_dir(
root_dir, scan_dir, allow_handlers_outside_root_dir)
def add_resource_path_alias(self,
alias_resource_path, existing_resource_path):
"""Add resource path alias.
Once added, request to alias_resource_path would be handled by
handler registered for existing_resource_path.
Args:
alias_resource_path: alias resource path
existing_resource_path: existing resource path
"""
try:
handler_suite = self._handler_suite_map[existing_resource_path]
self._handler_suite_map[alias_resource_path] = handler_suite
except KeyError:
raise DispatchException('No handler for: %r' %
existing_resource_path)
def source_warnings(self):
"""Return warnings in sourcing handlers."""
return self._source_warnings
def do_extra_handshake(self, request):
"""Do extra checking in WebSocket handshake.
Select a handler based on request.uri and call its
web_socket_do_extra_handshake function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
HandshakeException: when opening handshake failed
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' % request.ws_resource)
do_extra_handshake_ = handler_suite.do_extra_handshake
try:
do_extra_handshake_(request)
except handshake.AbortedByUserException, e:
raise
except Exception, e:
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_DO_EXTRA_HANDSHAKE_HANDLER_NAME,
request.ws_resource),
e)
raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
def transfer_data(self, request):
"""Let a handler transfer_data with a WebSocket client.
Select a handler based on request.ws_resource and call its
web_socket_transfer_data function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
"""
# TODO(tyoshino): Terminate underlying TCP connection if possible.
try:
if mux.use_mux(request):
mux.start(request, self)
else:
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' %
request.ws_resource)
transfer_data_ = handler_suite.transfer_data
transfer_data_(request)
if not request.server_terminated:
request.ws_stream.close_connection()
# Catch non-critical exceptions the handler didn't handle.
except handshake.AbortedByUserException, e:
self._logger.debug('%s', e)
raise
except msgutil.BadOperationException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
except msgutil.InvalidFrameException, e:
# InvalidFrameException must be caught before
# ConnectionTerminatedException that catches InvalidFrameException.
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
except msgutil.UnsupportedFrameException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
except stream.InvalidUTF8Exception, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(
common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
except msgutil.ConnectionTerminatedException, e:
self._logger.debug('%s', e)
except Exception, e:
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
e)
raise
def passive_closing_handshake(self, request):
"""Prepare code and reason for responding client initiated closing
handshake.
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
return _default_passive_closing_handshake_handler(request)
return handler_suite.passive_closing_handshake(request)
def get_handler_suite(self, resource):
"""Retrieves two handlers (one for extra handshake processing, and one
for data transfer) for the given request as a HandlerSuite object.
"""
fragment = None
if '#' in resource:
resource, fragment = resource.split('#', 1)
if '?' in resource:
resource = resource.split('?', 1)[0]
handler_suite = self._handler_suite_map.get(resource)
if handler_suite and fragment:
raise DispatchException('Fragment identifiers MUST NOT be used on '
'WebSocket URIs',
common.HTTP_STATUS_BAD_REQUEST)
return handler_suite
def _source_handler_files_in_dir(
self, root_dir, scan_dir, allow_handlers_outside_root_dir):
"""Source all the handler source files in the scan_dir directory.
The resource path is determined relative to root_dir.
"""
# We build a map from resource to handler code assuming that there's
# only one path from root_dir to scan_dir and it can be obtained by
# comparing realpath of them.
# Here we cannot use abspath. See
# https://bugs.webkit.org/show_bug.cgi?id=31603
convert = _create_path_to_resource_converter(root_dir)
scan_realpath = os.path.realpath(scan_dir)
root_realpath = os.path.realpath(root_dir)
for path in _enumerate_handler_file_paths(scan_realpath):
if (not allow_handlers_outside_root_dir and
(not os.path.realpath(path).startswith(root_realpath))):
self._logger.debug(
'Canonical path of %s is not under root directory' %
path)
continue
try:
handler_suite = _source_handler_file(open(path).read())
except DispatchException, e:
self._source_warnings.append('%s: %s' % (path, e))
continue
resource = convert(path)
if resource is None:
self._logger.debug(
'Path to resource conversion on %s failed' % path)
else:
self._handler_suite_map[convert(path)] = handler_suite
# vi:sts=4 sw=4 et
| bsd-3-clause |
diorcety/intellij-community | python/lib/Lib/xml/dom/minicompat.py | 139 | 5287 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# isinstance -- version of the isinstance() function that accepts
# tuples as the second parameter regardless of the
# Python version
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guarateed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# GetattrMagic -- base class used to make _get_<attr> be magically
# invoked when available
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
#
# NewStyle -- base class to cause __slots__ to be honored in
# the new world
#
# True, False -- only for Python 2.2 and earlier
__all__ = ["NodeList", "EmptyNodeList", "NewStyle",
"StringTypes", "defproperty", "GetattrMagic"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
# define True and False only if not defined as built-ins
try:
True
except NameError:
True = 1
False = 0
__all__.extend(["True", "False"])
try:
isinstance('', StringTypes)
except TypeError:
#
# Wrap isinstance() to make it compatible with the version in
# Python 2.2 and newer.
#
_isinstance = isinstance
def isinstance(obj, type_or_seq):
try:
return _isinstance(obj, type_or_seq)
except TypeError:
for t in type_or_seq:
if _isinstance(obj, t):
return 1
return 0
__all__.append("isinstance")
if list is type([]):
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
else:
def NodeList():
return []
def EmptyNodeList():
return []
try:
property
except NameError:
def defproperty(klass, name, doc):
# taken care of by the base __getattr__()
pass
class GetattrMagic:
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError, key
try:
get = getattr(self, "_get_" + key)
except AttributeError:
raise AttributeError, key
return get()
class NewStyle:
pass
else:
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
class GetattrMagic:
pass
NewStyle = object
| apache-2.0 |
onitake/ansible | lib/ansible/modules/files/iso_extract.py | 101 | 6645 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Jeroen Hoekx (@jhoekx)
- Matt Robinson (@ribbons)
- Dag Wieers (@dagwieers)
module: iso_extract
short_description: Extract files from an ISO image
description:
- This module has two possible ways of operation.
- If 7zip is installed on the system, this module extracts files from an ISO
into a temporary directory and copies files to a given destination,
if needed.
- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
mounts the ISO image to a temporary location, and copies files to a given
destination, if needed.
version_added: '2.3'
requirements:
- Either 7z (from I(7zip) or I(p7zip) package)
- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
options:
image:
description:
- The ISO image to extract files from.
required: yes
aliases: [ path, src ]
dest:
description:
- The destination directory to extract files to.
required: yes
files:
description:
- A list of files to extract from the image.
- Extracting directories does not work.
required: yes
force:
description:
- If C(yes), which will replace the remote file when contents are different than the source.
- If C(no), the file will only be extracted and copied if the destination does not already exist.
type: bool
default: 'yes'
aliases: [ thirsty ]
version_added: '2.4'
executable:
description:
- The path to the C(7z) executable to use for extracting files from the ISO.
default: '7z'
version_added: '2.4'
notes:
- Only the file checksum (content) is taken into account when extracting files
from the ISO image. If C(force=no), only checks the presence of the file.
- In Ansible v2.3 this module was using C(mount) and C(umount) commands only,
requiring root access. This is no longer needed with the introduction of 7zip
for extraction.
'''
EXAMPLES = r'''
- name: Extract kernel and ramdisk from a LiveCD
iso_extract:
image: /tmp/rear-test.iso
dest: /tmp/virt-rear/
files:
- isolinux/kernel
- isolinux/initrd.cgz
'''
RETURN = r'''
#
'''
import os.path
import shutil
import tempfile
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(type='path', required=True, aliases=['path', 'src']),
dest=dict(type='path', required=True),
files=dict(type='list', required=True),
force=dict(type='bool', default=True, aliases=['thirsty']),
executable=dict(type='path'), # No default on purpose
),
supports_check_mode=True,
)
image = module.params['image']
dest = module.params['dest']
files = module.params['files']
force = module.params['force']
executable = module.params['executable']
result = dict(
changed=False,
dest=dest,
image=image,
)
# We want to know if the user provided it or not, so we set default here
if executable is None:
executable = '7z'
binary = module.get_bin_path(executable, None)
# When executable was provided and binary not found, warn user !
if module.params['executable'] is not None and not binary:
module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
if not os.path.exists(dest):
module.fail_json(msg="Directory '%s' does not exist" % dest)
if not os.path.exists(os.path.dirname(image)):
module.fail_json(msg="ISO image '%s' does not exist" % image)
result['files'] = []
extract_files = list(files)
if not force:
# Check if we have to process any files based on existence
for f in files:
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
result['files'].append(dict(
checksum=None,
dest=dest_file,
src=f,
))
extract_files.remove(f)
if not extract_files:
module.exit_json(**result)
tmp_dir = tempfile.mkdtemp()
# Use 7zip when we have a binary, otherwise try to mount
if binary:
cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
else:
cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
rc, out, err = module.run_command(cmd)
if rc != 0:
result.update(dict(
cmd=cmd,
rc=rc,
stderr=err,
stdout=out,
))
shutil.rmtree(tmp_dir)
if binary:
module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
else:
module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
try:
for f in extract_files:
tmp_src = os.path.join(tmp_dir, f)
if not os.path.exists(tmp_src):
module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
src_checksum = module.sha1(tmp_src)
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
dest_checksum = module.sha1(dest_file)
else:
dest_checksum = None
result['files'].append(dict(
checksum=src_checksum,
dest=dest_file,
src=f,
))
if src_checksum != dest_checksum:
if not module.check_mode:
shutil.copy(tmp_src, dest_file)
result['changed'] = True
finally:
if not binary:
module.run_command('umount "%s"' % tmp_dir)
shutil.rmtree(tmp_dir)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
anksp21/Community-Zenpacks | ZenPacks.vmware.VirtualMachines/ZenPacks/vmware/VirtualMachines/VMDevice.py | 3 | 1658 | from Globals import InitializeClass
from Products.ZenModel.Device import Device
from Products.ZenModel.ZenossSecurity import ZEN_VIEW
from Products.ZenRelations.RelSchema import *
import copy
class VMDevice(Device):
"VMWare ESX Server"
_relations = Device._relations + (
('virtualmachines', ToManyCont(ToOne,
"ZenPacks.vmware.VirtualMachines.VirtualMachine", "host")),
)
factory_type_information = (
{
'immediate_view' : 'deviceStatus',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'deviceStatus'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'osdetail'
, 'name' : 'OS'
, 'action' : 'deviceOsDetail'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'virtualmachineData'
, 'name' : 'Virtual Machines'
, 'action' : 'virtualmachineData'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'hwdetail'
, 'name' : 'Hardware'
, 'action' : 'deviceHardwareDetail'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'events'
, 'name' : 'Events'
, 'action' : 'viewEvents'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'perfServer'
, 'name' : 'Perf'
, 'action' : 'viewDevicePerformance'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'edit'
, 'name' : 'Edit'
, 'action' : 'editDevice'
, 'permissions' : ("Change Device",)
},
)
},
)
InitializeClass(VMDevice)
| gpl-2.0 |
herow/planning_qgis | python/ext-libs/owslib/coverage/wcs110.py | 29 | 18511 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
##########NOTE: Does not conform to new interfaces yet #################
from wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.util import openURL, testXMLValue
from urllib import urlencode
from urllib2 import urlopen
from owslib.etree import etree
import os, errno
from owslib.coverage import wcsdecoder
from owslib.crs import Crs
import logging
from owslib.util import log
def ns(tag):
return '{http://www.opengis.net/wcs/1.1}'+tag
class WebCoverageService_1_1_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.1.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self,url,xml, cookies):
self.version='1.1.0'
self.url = url
self.cookies=cookies
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find('{http://www.opengis.net/ows/1.1}Exception')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
#build metadata objects:
#serviceIdentification metadata
elem=self._capabilities.find('{http://www.opengis.net/wcs/1.1/ows}ServiceIdentification')
if elem is None:
elem=self._capabilities.find('{http://www.opengis.net/ows/1.1}ServiceIdentification')
self.identification=ServiceIdentification(elem)
#serviceProvider
elem=self._capabilities.find('{http://www.opengis.net/ows/1.1}ServiceProvider')
self.provider=ServiceProvider(elem)
#serviceOperations
self.operations = []
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1/ows}OperationsMetadata/{http://www.opengis.net/wcs/1.1/ows}Operation/'):
self.operations.append(Operation(elem))
# exceptions - ***********TO DO *************
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
# serviceContents: our assumption is that services use a top-level layer
# as a metadata organizer, nothing more.
self.contents = {}
top = self._capabilities.find('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary')
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary/{http://www.opengis.net/wcs/1.1}CoverageSummary'):
cm=ContentMetadata(elem, top, self)
self.contents[cm.id]=cm
if self.contents=={}:
#non-hierarchical.
top=None
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary'):
cm=ContentMetadata(elem, top, self)
#make the describeCoverage requests to populate the supported formats/crs attributes
self.contents[cm.id]=cm
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
#TO DECIDE: Offer repackaging of coverageXML/Multipart MIME output?
#def getData(self, directory='outputdir', outputfile='coverage.nc', **kwargs):
#u=self.getCoverageRequest(**kwargs)
##create the directory if it doesn't exist:
#try:
#os.mkdir(directory)
#except OSError, e:
## Ignore directory exists error
#if e.errno <> errno.EEXIST:
#raise
##elif wcs.version=='1.1.0':
##Could be multipart mime or XML Coverages document, need to use the decoder...
#decoder=wcsdecoder.WCSDecoder(u)
#x=decoder.getCoverages()
#if type(x) is wcsdecoder.MpartMime:
#filenames=x.unpackToDir(directory)
##print 'Files from 1.1.0 service written to %s directory'%(directory)
#else:
#filenames=x
#return filenames
#TO DO: Handle rest of the WCS 1.1.0 keyword parameters e.g. GridCRS etc.
def getCoverage(self, identifier=None, bbox=None, time=None, format = None, store=False, rangesubset=None, gridbaseCRS=None, gridtype=None, gridCS=None, gridorigin=None, gridoffsets=None, method='Get',**kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverageRequest(identifier=['TuMYrRQ4'], time=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='application/netcdf', store='true')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIMESEQUENCE=2792-06-01T00:00:00.0&FORMAT=application/netcdf
if store = true, returns a coverages XML file
if store = false, returns a multipart mime
"""
if log.isEnabledFor(logging.DEBUG):
log.debug('WCS 1.1.0 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, rangesubset=%s, gridbaseCRS=%s, gridtype=%s, gridCS=%s, gridorigin=%s, gridoffsets=%s, method=%s, other_arguments=%s'%(identifier, bbox, time, format, rangesubset, gridbaseCRS, gridtype, gridCS, gridorigin, gridoffsets, method, str(kwargs)))
if method == 'Get':
method='{http://www.opengis.net/wcs/1.1/ows}Get'
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetCoverage').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
#process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service':'WCS'}
assert len(identifier) > 0
request['identifier']=identifier
#request['identifier'] = ','.join(identifier)
if bbox:
request['boundingbox']=','.join([repr(x) for x in bbox])
if time:
request['timesequence']=','.join(time)
request['format']=format
request['store']=store
#rangesubset: untested - require a server implementation
if rangesubset:
request['RangeSubset']=rangesubset
#GridCRS structure: untested - require a server implementation
if gridbaseCRS:
request['gridbaseCRS']=gridbaseCRS
if gridtype:
request['gridtype']=gridtype
if gridCS:
request['gridCS']=gridCS
if gridorigin:
request['gridorigin']=gridorigin
if gridoffsets:
request['gridoffsets']=gridoffsets
#anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
#encode and request
data = urlencode(request)
u=openURL(base_url, data, method, self.cookies)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class Operation(object):
"""Abstraction for operation metadata
Implements IOperationMetadata.
"""
def __init__(self, elem):
self.name = elem.get('name')
self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')]
methods = []
for verb in elem.findall('{http://www.opengis.net/wcs/1.1/ows}DCP/{http://www.opengis.net/wcs/1.1/ows}HTTP/*'):
url = verb.attrib['{http://www.w3.org/1999/xlink}href']
methods.append((verb.tag, {'url': url}))
self.methods = dict(methods)
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification Metadata
implements IServiceIdentificationMetadata"""
def __init__(self,elem):
self.service="WCS"
self.version="1.1.0"
self.title=testXMLValue(elem.find('{http://www.opengis.net/ows}Title'))
if self.title is None: #may have used the wcs ows namespace:
self.title=testXMLValue(elem.find('{http://www.opengis.net/wcs/1.1/ows}Title'))
self.abstract=testXMLValue(elem.find('{http://www.opengis.net/ows}Abstract'))
if self.abstract is None:#may have used the wcs ows namespace:
self.abstract=testXMLValue(elem.find('{http://www.opengis.net/wcs/1.1/ows}Abstract'))
if elem.find('{http://www.opengis.net/ows}Abstract') is not None:
self.abstract=elem.find('{http://www.opengis.net/ows}Abstract').text
else:
self.abstract = None
self.keywords = [f.text for f in elem.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword')]
#self.link = elem.find('{http://www.opengis.net/wcs/1.1}Service/{http://www.opengis.net/wcs/1.1}OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
if elem.find('{http://www.opengis.net/wcs/1.1/ows}Fees') is not None:
self.fees=elem.find('{http://www.opengis.net/wcs/1.1/ows}Fees').text
else:
self.fees=None
if elem.find('{http://www.opengis.net/wcs/1.1/ows}AccessConstraints') is not None:
self.accessConstraints=elem.find('{http://www.opengis.net/wcs/1.1/ows}AccessConstraints').text
else:
self.accessConstraints=None
class ServiceProvider(object):
""" Abstraction for ServiceProvider metadata
implements IServiceProviderMetadata """
def __init__(self,elem):
name=elem.find('{http://www.opengis.net/ows}ProviderName')
if name is not None:
self.name=name.text
else:
self.name=None
#self.contact=ServiceContact(elem.find('{http://www.opengis.net/ows}ServiceContact'))
self.contact =ContactMetadata(elem)
self.url=self.name # no obvious definitive place for url in wcs, repeat provider name?
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}IndividualName').text
except AttributeError:
self.name = None
try:
self.organization=elem.find('{http://www.opengis.net/ows}ProviderName').text
except AttributeError:
self.organization = None
try:
self.address = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}DeliveryPoint').text
except AttributeError:
self.address = None
try:
self.city= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}City').text
except AttributeError:
self.city = None
try:
self.region= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}AdministrativeArea').text
except AttributeError:
self.region = None
try:
self.postcode= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}PostalCode').text
except AttributeError:
self.postcode = None
try:
self.country= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}Country').text
except AttributeError:
self.country = None
try:
self.email = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}ElectronicMailAddress').text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""Abstraction for WCS ContentMetadata
Implements IContentMetadata
"""
def __init__(self, elem, parent, service):
"""Initialize."""
#TODO - examine the parent for bounding box info.
self._service=service
self._elem=elem
self._parent=parent
self.id=self._checkChildAndParent('{http://www.opengis.net/wcs/1.1}Identifier')
self.description =self._checkChildAndParent('{http://www.opengis.net/wcs/1.1}Description')
self.title =self._checkChildAndParent('{http://www.opengis.net/ows}Title')
self.abstract =self._checkChildAndParent('{http://www.opengis.net/ows}Abstract')
#keywords.
self.keywords=[]
for kw in elem.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword'):
if kw is not None:
self.keywords.append(kw.text)
#also inherit any keywords from parent coverage summary (if there is one)
if parent is not None:
for kw in parent.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword'):
if kw is not None:
self.keywords.append(kw.text)
self.boundingBox=None #needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find('{http://www.opengis.net/ows}WGS84BoundingBox')
if b is not None:
lc=b.find('{http://www.opengis.net/ows}LowerCorner').text
uc=b.find('{http://www.opengis.net/ows}UpperCorner').text
self.boundingBoxWGS84 = (
float(lc.split()[0]),float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# bboxes - other CRS
self.boundingboxes = []
for bbox in elem.findall('{http://www.opengis.net/ows}BoundingBox'):
if bbox is not None:
try:
lc=b.find('{http://www.opengis.net/ows}LowerCorner').text
uc=b.find('{http://www.opengis.net/ows}UpperCorner').text
boundingBox = (
float(lc.split()[0]),float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
b.attrib['crs'])
self.boundingboxes.append(boundingBox)
except:
pass
#others not used but needed for iContentMetadata harmonisation
self.styles=None
self.crsOptions=None
#SupportedCRS
self.supportedCRS=[]
for crs in elem.findall('{http://www.opengis.net/wcs/1.1}SupportedCRS'):
self.supportedCRS.append(Crs(crs.text))
#SupportedFormats
self.supportedFormats=[]
for format in elem.findall('{http://www.opengis.net/wcs/1.1}SupportedFormat'):
self.supportedFormats.append(format.text)
#grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
grid=None
#TODO- convert this to 1.1 from 1.0
#if not hasattr(self, 'descCov'):
#self.descCov=self._service.getDescribeCoverage(self.id)
#gridelem= self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}RectifiedGrid')
#if gridelem is not None:
#grid=RectifiedGrid(gridelem)
#else:
#gridelem=self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}Grid')
#grid=Grid(gridelem)
return grid
grid=property(_getGrid, None)
#time limits/postions require a describeCoverage request therefore only resolve when requested
def _getTimeLimits(self):
timelimits=[]
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageDescription/')+ns('Domain/')+ns('TemporalDomain/')+ns('TimePeriod/')):
subelems=elem.getchildren()
timelimits=[subelems[0].text,subelems[1].text]
return timelimits
timelimits=property(_getTimeLimits, None)
#TODO timepositions property
def _getTimePositions(self):
return []
timepositions=property(_getTimePositions, None)
def _checkChildAndParent(self, path):
''' checks child coverage summary, and if item not found checks higher level coverage summary'''
try:
value = self._elem.find(path).text
except:
try:
value = self._parent.find(path).text
except:
value = None
return value
| gpl-2.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/trace.py | 153 | 29890 | #!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = 100 * n_hits // n_lines
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
| gpl-3.0 |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/django/contrib/gis/tests/relatedapp/models.py | 20 | 1525 | from django.contrib.gis.db import models
from django.contrib.localflavor.us.models import USStateField
class Location(models.Model):
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.point.wkt
class City(models.Model):
name = models.CharField(max_length=50)
state = USStateField()
location = models.ForeignKey(Location)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(models.Model):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
objects = models.GeoManager()
class Parcel(models.Model):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __unicode__(self): return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(models.Model):
name = models.CharField(max_length=100)
objects = models.GeoManager()
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
objects = models.GeoManager()
| mit |
dhruvsrivastava/OJ | flask/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_select.py | 78 | 5729 | from .. import fixtures, config
from ..assertions import eq_
from sqlalchemy import util
from sqlalchemy import Integer, String, select, func, bindparam
from sqlalchemy import testing
from ..schema import Table, Column
class OrderByLabelTest(fixtures.TablesTest):
"""Test the dialect sends appropriate ORDER BY expressions when
labels are used.
This essentially exercises the "supports_simple_order_by_label"
setting.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("some_table", metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
Column('q', String(50)),
Column('p', String(50))
)
@classmethod
def insert_data(cls):
config.db.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
{"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
{"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
]
)
def _assert_result(self, select, result):
eq_(
config.db.execute(select).fetchall(),
result
)
def test_plain(self):
table = self.tables.some_table
lx = table.c.x.label('lx')
self._assert_result(
select([lx]).order_by(lx),
[(1, ), (2, ), (3, )]
)
def test_composed_int(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label('lx')
self._assert_result(
select([lx]).order_by(lx),
[(3, ), (5, ), (7, )]
)
def test_composed_multiple(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label('lx')
ly = (func.lower(table.c.q) + table.c.p).label('ly')
self._assert_result(
select([lx, ly]).order_by(lx, ly.desc()),
[(3, util.u('q1p3')), (5, util.u('q2p2')), (7, util.u('q3p1'))]
)
def test_plain_desc(self):
table = self.tables.some_table
lx = table.c.x.label('lx')
self._assert_result(
select([lx]).order_by(lx.desc()),
[(3, ), (2, ), (1, )]
)
def test_composed_int_desc(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label('lx')
self._assert_result(
select([lx]).order_by(lx.desc()),
[(7, ), (5, ), (3, )]
)
def test_group_by_composed(self):
table = self.tables.some_table
expr = (table.c.x + table.c.y).label('lx')
stmt = select([func.count(table.c.id), expr]).group_by(expr).order_by(expr)
self._assert_result(
stmt,
[(1, 3), (1, 5), (1, 7)]
)
class LimitOffsetTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("some_table", metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer))
@classmethod
def insert_data(cls):
config.db.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
]
)
def _assert_result(self, select, result, params=()):
eq_(
config.db.execute(select, params).fetchall(),
result
)
def test_simple_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2),
[(1, 1, 2), (2, 2, 3)]
)
@testing.requires.offset
def test_simple_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(2),
[(3, 3, 4), (4, 4, 5)]
)
@testing.requires.offset
def test_simple_limit_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2).offset(1),
[(2, 2, 3), (3, 3, 4)]
)
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select([table]).order_by(table.c.id).limit(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect,
compile_kwargs={"literal_binds": True})
sql = str(sql)
self._assert_result(
sql,
[(2, 2, 3), (3, 3, 4)]
)
@testing.requires.bound_limit_offset
def test_bound_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(bindparam('l')),
[(1, 1, 2), (2, 2, 3)],
params={"l": 2}
)
@testing.requires.bound_limit_offset
def test_bound_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(bindparam('o')),
[(3, 3, 4), (4, 4, 5)],
params={"o": 2}
)
@testing.requires.bound_limit_offset
def test_bound_limit_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).
limit(bindparam("l")).offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1}
)
| bsd-3-clause |
indeedops/dd-agent | tests/checks/integration/test_windows_service.py | 8 | 1127 | # 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
INSTANCE = {
'host': '.',
'services': ['EventLog', 'Dnscache', 'NonExistingService'],
}
INVALID_HOST_INSTANCE = {
'host': 'nonexistinghost',
'services': ['EventLog'],
}
@attr('windows')
@attr(requires='windows')
class WindowsServiceTest(AgentCheckTest):
CHECK_NAME = 'windows_service'
SERVICE_CHECK_NAME = 'windows_service.state'
def test_basic_check(self):
self.run_check({'instances': [INSTANCE]})
self.assertServiceCheckOK(self.SERVICE_CHECK_NAME, tags=['service:EventLog'], count=1)
self.assertServiceCheckOK(self.SERVICE_CHECK_NAME, tags=['service:Dnscache'], count=1)
self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME, tags=['service:NonExistingService'], count=1)
self.coverage_report()
def test_invalid_host(self):
self.run_check({'instances': [INVALID_HOST_INSTANCE]})
self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME, tags=['host:nonexistinghost', 'service:EventLog'], count=1)
self.coverage_report()
| bsd-3-clause |
maelnor/nova | nova/tests/api/openstack/compute/contrib/test_evacuate.py | 3 | 11001 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id, want_objects=False,
**kwargs):
# BAD_UUID is something that does not exist
if instance_id == 'BAD_UUID':
raise exception.InstanceNotFound(instance_id=instance_id)
else:
return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
task_state=None, host='host1',
vm_state=vm_states.ACTIVE)
def fake_service_get_by_compute_host(self, context, host):
if host == 'bad-host':
raise exception.ComputeHostNotFound(host=host)
else:
return {
'host_name': host,
'service': 'compute',
'zone': 'nova'
}
class EvacuateTestV21(test.NoDBTestCase):
_methods = ('resize', 'evacuate')
fake_url = '/v2/fake'
def setUp(self):
super(EvacuateTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
def _fake_wsgi_app(self, ctxt):
return fakes.wsgi_app_v21(fake_auth_context=ctxt)
def _gen_resource_with_app(self, json_load, is_admin=True, uuid=None):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = is_admin
app = self._fake_wsgi_app(ctxt)
req = webob.Request.blank('%s/servers/%s/action' % (self.fake_url,
uuid or self.UUID))
req.method = 'POST'
base_json_load = {'evacuate': json_load}
req.body = jsonutils.dumps(base_json_load)
req.content_type = 'application/json'
return req.get_response(app)
def _fake_update(self, inst, context, instance, task_state,
expected_task_state):
return None
def test_evacuate_with_valid_instance(self):
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 200)
def test_evacuate_with_invalid_instance(self):
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
uuid='BAD_UUID')
self.assertEqual(res.status_int, 404)
def test_evacuate_with_active_service(self):
def fake_evacuate(*args, **kwargs):
raise exception.ComputeServiceInUse("Service still in use")
self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_no_target(self):
res = self._gen_resource_with_app({'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(200, res.status_int)
def test_evacuate_instance_without_on_shared_storage(self):
res = self._gen_resource_with_app({'host': 'my-host',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_invalid_characters_host(self):
host = 'abc!#'
res = self._gen_resource_with_app({'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(400, res.status_int)
def test_evacuate_instance_with_too_long_host(self):
host = 'a' * 256
res = self._gen_resource_with_app({'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(400, res.status_int)
def test_evacuate_instance_with_invalid_on_shared_storage(self):
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'foo',
'adminPass': 'MyNewPass'})
self.assertEqual(400, res.status_int)
def test_evacuate_instance_with_bad_target(self):
res = self._gen_resource_with_app({'host': 'bad-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 404)
def test_evacuate_instance_with_target(self):
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 200)
resp_json = jsonutils.loads(res.body)
self.assertEqual("MyNewPass", resp_json['adminPass'])
def test_evacuate_shared_and_pass(self):
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 400)
def test_evacuate_not_shared_pass_generated(self):
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'False'})
self.assertEqual(res.status_int, 200)
resp_json = jsonutils.loads(res.body)
self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_evacuate_shared(self):
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'True'})
self.assertEqual(res.status_int, 200)
def test_not_admin(self):
res = self._gen_resource_with_app({'host': 'my-host',
'onSharedStorage': 'True'},
is_admin=False)
self.assertEqual(res.status_int, 403)
def test_evacuate_to_same_host(self):
res = self._gen_resource_with_app({'host': 'host1',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_empty_host(self):
res = self._gen_resource_with_app({'host': '',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(400, res.status_int)
def test_evacuate_instance_with_underscore_in_hostname(self):
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'underscore_hostname',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual("MyNewPass", resp_json['adminPass'])
def test_evacuate_disable_password_return(self):
self._test_evacuate_enable_instance_password_conf(False)
def test_evacuate_enable_password_return(self):
self._test_evacuate_enable_instance_password_conf(True)
def _test_evacuate_enable_instance_password_conf(self, enable_pass):
self.flags(enable_instance_password=enable_pass)
self.stubs.Set(compute_api.API, 'update', self._fake_update)
res = self._gen_resource_with_app({'host': 'my_host',
'onSharedStorage': 'False'})
self.assertEqual(res.status_int, 200)
resp_json = jsonutils.loads(res.body)
if enable_pass:
self.assertIn('adminPass', resp_json)
else:
self.assertIsNone(resp_json.get('adminPass'))
class EvacuateTestV2(EvacuateTestV21):
def setUp(self):
super(EvacuateTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Evacuate'])
def _fake_wsgi_app(self, ctxt):
return fakes.wsgi_app(fake_auth_context=ctxt)
def test_evacuate_instance_with_no_target(self):
res = self._gen_resource_with_app({'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
self.assertEqual(400, res.status_int)
def test_evacuate_instance_with_too_long_host(self):
pass
def test_evacuate_instance_with_invalid_characters_host(self):
pass
def test_evacuate_instance_with_invalid_on_shared_storage(self):
pass
def test_evacuate_disable_password_return(self):
pass
def test_evacuate_enable_password_return(self):
pass
| apache-2.0 |
nwjs/chromium.src | tools/flakiness/is_flaky_test.py | 94 | 2089 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for is_flaky."""
import is_flaky
import subprocess
import sys
import threading
import unittest
class IsFlakyTest(unittest.TestCase):
def setUp(self):
self.original_subprocess_check_call = subprocess.check_call
subprocess.check_call = self.mock_check_call
self.check_call_calls = []
self.check_call_results = []
is_flaky.load_options = self.mock_load_options
def tearDown(self):
subprocess.check_call = self.original_subprocess_check_call
def mock_check_call(self, command, stdout, stderr):
self.check_call_calls.append(command)
if self.check_call_results:
return self.check_call_results.pop(0)
else:
return 0
def mock_load_options(self):
class MockOptions():
jobs = 2
retries = 10
threshold = 0.3
command = ['command', 'param1', 'param2']
return MockOptions()
def testExecutesTestCorrectNumberOfTimes(self):
is_flaky.main()
self.assertEqual(len(self.check_call_calls), 10)
def testExecutesTestWithCorrectArguments(self):
is_flaky.main()
for call in self.check_call_calls:
self.assertEqual(call, ['command', 'param1', 'param2'])
def testReturnsNonFlakyForAllSuccesses(self):
self.check_call_results = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ret_code = is_flaky.main()
self.assertEqual(ret_code, 0)
def testReturnsNonFlakyForAllFailures(self):
self.check_call_results = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
ret_code = is_flaky.main()
self.assertEqual(ret_code, 0)
def testReturnsNonFlakyForSmallNumberOfFailures(self):
self.check_call_results = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0]
ret_code = is_flaky.main()
self.assertEqual(ret_code, 0)
def testReturnsFlakyForLargeNumberOfFailures(self):
self.check_call_results = [1, 1, 1, 0, 1, 0, 0, 0, 0, 0]
ret_code = is_flaky.main()
self.assertEqual(ret_code, 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
borosnborea/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/setuptools/command/build_py.py | 55 | 9596 | from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import fnmatch
import textwrap
import io
import distutils.errors
import itertools
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter, filterfalse
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
"lazily compute data files"
if attr == 'data_files':
self.data_files = self._get_data_files()
return self.data_files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
if six.PY2 and isinstance(package, six.string_types):
# avoid errors on Python 2 when unicode is passed (#190)
package = package.split('.')
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
return list(map(self._get_pkg_data_files, self.packages or ()))
def _get_pkg_data_files(self, package):
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Strip directory from globbed filenames
filenames = [
os.path.relpath(file, src_dir)
for file in self.find_data_files(package, src_dir)
]
return package, src_dir, build_dir, filenames
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
src_dir,
)
globs_expanded = map(glob, patterns)
# flatten the expanded globs into an iterable of matches
globs_matches = itertools.chain.from_iterable(globs_expanded)
glob_files = filter(os.path.isfile, globs_matches)
files = itertools.chain(
self.manifest_files.get(package, []),
glob_files,
)
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
with io.open(init_py, 'rb') as f:
contents = f.read()
if b'declare_namespace' not in contents:
raise distutils.errors.DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
)
match_groups = (
fnmatch.filter(files, pattern)
for pattern in patterns
)
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
bad = set(matches)
keepers = (
fn
for fn in files
if fn not in bad
)
# ditch dupes
return list(_unique_everseen(keepers))
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
yield platfrom-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
# from Python docs
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| gpl-3.0 |
codekaki/odoo | addons/project_gtd/project_gtd.py | 53 | 5207 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class project_gtd_context(osv.osv):
_name = "project.gtd.context"
_description = "Context"
_columns = {
'name': fields.char('Context', size=64, required=True, select=1, translate=1),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of contexts."),
}
_defaults = {
'sequence': 1
}
_order = "sequence, name"
project_gtd_context()
class project_gtd_timebox(osv.osv):
_name = "project.gtd.timebox"
_order = "sequence"
_columns = {
'name': fields.char('Timebox', size=64, required=True, select=1, translate=1),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of timebox."),
'icon': fields.selection(tools.icons, 'Icon', size=64),
}
project_gtd_timebox()
class project_task(osv.osv):
_inherit = "project.task"
_columns = {
'timebox_id': fields.many2one('project.gtd.timebox', "Timebox",help="Time-laps during which task has to be treated"),
'context_id': fields.many2one('project.gtd.context', "Context",help="The context place where user has to treat task"),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if not default:
default = {}
default['timebox_id'] = False
default['context_id'] = False
return super(project_task,self).copy_data(cr, uid, id, default, context)
def _get_context(self, cr, uid, context=None):
ids = self.pool.get('project.gtd.context').search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'context_id': _get_context
}
def next_timebox(self, cr, uid, ids, *args):
timebox_obj = self.pool.get('project.gtd.timebox')
timebox_ids = timebox_obj.search(cr,uid,[])
if not timebox_ids: return True
for task in self.browse(cr,uid,ids):
timebox = task.timebox_id.id
if not timebox:
self.write(cr, uid, task.id, {'timebox_id': timebox_ids[0]})
elif timebox_ids.index(timebox) != len(timebox_ids)-1:
index = timebox_ids.index(timebox)
self.write(cr, uid, task.id, {'timebox_id': timebox_ids[index+1]})
return True
def prev_timebox(self, cr, uid, ids, *args):
timebox_obj = self.pool.get('project.gtd.timebox')
timebox_ids = timebox_obj.search(cr,uid,[])
for task in self.browse(cr,uid,ids):
timebox = task.timebox_id.id
if timebox:
if timebox_ids.index(timebox):
index = timebox_ids.index(timebox)
self.write(cr, uid, task.id, {'timebox_id': timebox_ids[index - 1]})
else:
self.write(cr, uid, task.id, {'timebox_id': False})
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if not context: context = {}
res = super(project_task,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
search_extended = False
timebox_obj = self.pool.get('project.gtd.timebox')
if (res['type'] == 'search') and context.get('gtd', False):
tt = timebox_obj.browse(cr, uid, timebox_obj.search(cr,uid,[]), context=context)
search_extended =''
for time in tt:
if time.icon:
icon = time.icon
else :
icon=""
search_extended += '''<filter domain="[('timebox_id','=', ''' + str(time.id) + ''')]" icon="''' + icon + '''" string="''' + time.name + '''" context="{'user_invisible': True}"/>\n'''
search_extended +='''<separator orientation="vertical"/>'''
res['arch'] = tools.ustr(res['arch']).replace('<separator name="gtdsep"/>', search_extended)
return res
project_task()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jnovinger/django | tests/custom_migration_operations/operations.py | 518 | 2171 | from django.db.migrations.operations.base import Operation
class TestOperation(Operation):
def __init__(self):
pass
def deconstruct(self):
return (
self.__class__.__name__,
[],
{}
)
@property
def reversible(self):
return True
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def state_backwards(self, app_label, state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
class CreateModel(TestOperation):
pass
class ArgsOperation(TestOperation):
def __init__(self, arg1, arg2):
self.arg1, self.arg2 = arg1, arg2
def deconstruct(self):
return (
self.__class__.__name__,
[self.arg1, self.arg2],
{}
)
class KwargsOperation(TestOperation):
def __init__(self, kwarg1=None, kwarg2=None):
self.kwarg1, self.kwarg2 = kwarg1, kwarg2
def deconstruct(self):
kwargs = {}
if self.kwarg1 is not None:
kwargs['kwarg1'] = self.kwarg1
if self.kwarg2 is not None:
kwargs['kwarg2'] = self.kwarg2
return (
self.__class__.__name__,
[],
kwargs
)
class ArgsKwargsOperation(TestOperation):
def __init__(self, arg1, arg2, kwarg1=None, kwarg2=None):
self.arg1, self.arg2 = arg1, arg2
self.kwarg1, self.kwarg2 = kwarg1, kwarg2
def deconstruct(self):
kwargs = {}
if self.kwarg1 is not None:
kwargs['kwarg1'] = self.kwarg1
if self.kwarg2 is not None:
kwargs['kwarg2'] = self.kwarg2
return (
self.__class__.__name__,
[self.arg1, self.arg2],
kwargs,
)
class ExpandArgsOperation(TestOperation):
serialization_expand_args = ['arg']
def __init__(self, arg):
self.arg = arg
def deconstruct(self):
return (
self.__class__.__name__,
[self.arg],
{}
)
| bsd-3-clause |
battlecat/Spirit | spirit/comment/flag/migrations/0001_initial.py | 12 | 2062 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('spirit_comment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CommentFlag',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('is_closed', models.BooleanField(default=False)),
('comment', models.OneToOneField(to='spirit_comment.Comment')),
('moderator', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'verbose_name_plural': 'comments flags',
'ordering': ['-date', '-pk'],
'verbose_name': 'comment flag',
},
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('reason', models.IntegerField(choices=[(0, 'Spam'), (1, 'Other')], verbose_name='reason')),
('body', models.TextField(verbose_name='body', blank=True)),
('comment', models.ForeignKey(to='spirit_comment.Comment')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'flags',
'ordering': ['-date', '-pk'],
'verbose_name': 'flag',
},
),
migrations.AlterUniqueTogether(
name='flag',
unique_together=set([('user', 'comment')]),
),
]
| mit |
napsternxg/gensim | gensim/test/test_scripts.py | 4 | 6136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vimig Socrates <vimig.socrates@gmail.com> heavily influenced from @AakaashRao
# Copyright (C) 2018 Manos Stergiadis <em.stergiadis@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the output of gensim.scripts.
"""
from __future__ import unicode_literals
import json
import logging
import os.path
import unittest
import numpy as np
from gensim import utils
from gensim.scripts.segment_wiki import segment_all_articles, segment_and_write_all_articles
from gensim.test.utils import datapath, get_tmpfile
from gensim.scripts.word2vec2tensor import word2vec2tensor
from gensim.models import KeyedVectors
class TestSegmentWiki(unittest.TestCase):
def setUp(self):
self.fname = datapath('enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2')
self.expected_title = 'Anarchism'
self.expected_section_titles = [
'Introduction',
'Etymology and terminology',
'History',
'Anarchist schools of thought',
'Internal issues and debates',
'Topics of interest',
'Criticisms',
'References',
'Further reading',
'External links'
]
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('script.tst')
extensions = ['', '.json']
for ext in extensions:
try:
os.remove(fname + ext)
except OSError:
pass
def test_segment_all_articles(self):
title, sections, interlinks = next(segment_all_articles(self.fname, include_interlinks=True))
# Check title
self.assertEqual(title, self.expected_title)
# Check section titles
section_titles = [s[0] for s in sections]
self.assertEqual(section_titles, self.expected_section_titles)
# Check text
first_section_text = sections[0][1]
first_sentence = "'''Anarchism''' is a political philosophy that advocates self-governed societies"
self.assertTrue(first_sentence in first_section_text)
# Check interlinks
self.assertEqual(len(interlinks), 685)
self.assertTrue(interlinks[0] == ("political philosophy", "political philosophy"))
self.assertTrue(interlinks[1] == ("self-governance", "self-governed"))
self.assertTrue(interlinks[2] == ("stateless society", "stateless societies"))
def test_generator_len(self):
expected_num_articles = 106
num_articles = sum(1 for x in segment_all_articles(self.fname))
self.assertEqual(num_articles, expected_num_articles)
def test_json_len(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1)
expected_num_articles = 106
with utils.open(tmpf, 'rb') as f:
num_articles = sum(1 for line in f)
self.assertEqual(num_articles, expected_num_articles)
def test_segment_and_write_all_articles(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1, include_interlinks=True)
# Get the first line from the text file we created.
with open(tmpf) as f:
first = next(f)
# decode JSON line into a Python dictionary object
article = json.loads(first)
title, section_titles, interlinks = article['title'], article['section_titles'], article['interlinks']
self.assertEqual(title, self.expected_title)
self.assertEqual(section_titles, self.expected_section_titles)
# Check interlinks
# JSON has no tuples, only lists. So, we convert lists to tuples explicitly before comparison.
self.assertEqual(len(interlinks), 685)
self.assertEqual(tuple(interlinks[0]), ("political philosophy", "political philosophy"))
self.assertEqual(tuple(interlinks[1]), ("self-governance", "self-governed"))
self.assertEqual(tuple(interlinks[2]), ("stateless society", "stateless societies"))
class TestWord2Vec2Tensor(unittest.TestCase):
def setUp(self):
self.datapath = datapath('word2vec_pre_kv_c')
self.output_folder = get_tmpfile('w2v2t_test')
self.metadata_file = self.output_folder + '_metadata.tsv'
self.tensor_file = self.output_folder + '_tensor.tsv'
self.vector_file = self.output_folder + '_vector.tsv'
def testConversion(self):
word2vec2tensor(word2vec_model_path=self.datapath, tensor_filename=self.output_folder)
with utils.open(self.metadata_file, 'rb') as f:
metadata = f.readlines()
with utils.open(self.tensor_file, 'rb') as f:
vectors = f.readlines()
# check if number of words and vector size in tensor file line up with word2vec
with utils.open(self.datapath, 'rb') as f:
first_line = f.readline().strip()
number_words, vector_size = map(int, first_line.split(b' '))
self.assertTrue(len(metadata) == len(vectors) == number_words,
('Metadata file %s and tensor file %s imply different number of rows.'
% (self.metadata_file, self.tensor_file)))
# grab metadata and vectors from written file
metadata = [word.strip() for word in metadata]
vectors = [vector.replace(b'\t', b' ') for vector in vectors]
# get the originaly vector KV model
orig_model = KeyedVectors.load_word2vec_format(self.datapath, binary=False)
# check that the KV model and tensor files have the same values key-wise
for word, vector in zip(metadata, vectors):
word_string = word.decode("utf8")
vector_string = vector.decode("utf8")
vector_array = np.array(list(map(float, vector_string.split())))
np.testing.assert_almost_equal(orig_model[word_string], vector_array, decimal=5)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 |
mdkent/percona-xtrabackup | test/python/testtools/testsuite.py | 42 | 2921 | # Copyright (c) 2009 testtools developers. See LICENSE for details.
"""Test suites and related things."""
__metaclass__ = type
__all__ = [
'ConcurrentTestSuite',
'iterate_tests',
]
try:
from Queue import Queue
except ImportError:
from queue import Queue
import threading
import unittest
import testtools
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class ConcurrentTestSuite(unittest.TestSuite):
"""A TestSuite whose run() calls out to a concurrency strategy."""
def __init__(self, suite, make_tests):
"""Create a ConcurrentTestSuite to execute suite.
:param suite: A suite to run concurrently.
:param make_tests: A helper function to split the tests in the
ConcurrentTestSuite into some number of concurrently executing
sub-suites. make_tests must take a suite, and return an iterable
of TestCase-like object, each of which must have a run(result)
method.
"""
super(ConcurrentTestSuite, self).__init__([suite])
self.make_tests = make_tests
def run(self, result):
"""Run the tests concurrently.
This calls out to the provided make_tests helper, and then serialises
the results so that result only sees activity from one TestCase at
a time.
ConcurrentTestSuite provides no special mechanism to stop the tests
returned by make_tests, it is up to the make_tests to honour the
shouldStop attribute on the result object they are run with, which will
be set if an exception is raised in the thread which
ConcurrentTestSuite.run is called in.
"""
tests = self.make_tests(self)
try:
threads = {}
queue = Queue()
result_semaphore = threading.Semaphore(1)
for test in tests:
process_result = testtools.ThreadsafeForwardingResult(result,
result_semaphore)
reader_thread = threading.Thread(
target=self._run_test, args=(test, process_result, queue))
threads[test] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
def _run_test(self, test, process_result, queue):
try:
test.run(process_result)
finally:
queue.put(test)
| gpl-2.0 |
newemailjdm/scipy | scipy/linalg/blas.py | 16 | 6609 | """
Low-level BLAS functions
========================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
=================
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
======================
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
======================
.. autosummary::
:toctree: generated/
cgemv
cgerc
cgeru
chemv
ctrmv
csyr
cher
cher2
dgemv
dger
dsymv
dtrmv
dsyr
dsyr2
sgemv
sger
ssymv
strmv
ssyr
ssyr2
zgemv
zgerc
zgeru
zhemv
ztrmv
zsyr
zher
zher2
BLAS Level 3 functions
======================
.. autosummary::
:toctree: generated/
cgemm
chemm
cherk
cher2k
csymm
csyrk
csyr2k
dgemm
dsymm
dsyrk
dsyr2k
sgemm
ssymm
ssyrk
ssyr2k
zgemm
zhemm
zherk
zher2k
zsymm
zsyrk
zsyr2k
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# 'd' will be default for 'i',..
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
"""
dtype = _np.dtype(dtype)
prefer_fortran = False
if arrays:
# use the most generic type in arrays
dtypes = [ar.dtype for ar in arrays]
dtype = _np.find_common_type(dtypes, ())
try:
index = dtypes.index(dtype)
except ValueError:
index = 0
if arrays[index].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for i, name in enumerate(names):
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
| bsd-3-clause |
Jandersoft/angular | scripts/ci/travis_after_all.py | 179 | 3535 | import os
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
#assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
if not os.getenv(TRAVIS_JOB_NUMBER):
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(os.getenv(TRAVIS_JOB_NUMBER)):
log.info("This is a leader")
else:
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.allow_failure = json_raw['allow_failure']
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot():
"""
:return: Matrix List
"""
response = urllib2.build_opener().open("https://api.travis-ci.org/builds/{0}".format(build_id)).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(element) for element in raw_json["matrix"]]
return matrix_without_leader
def wait_others_to_finish():
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot()
finished = [el.is_finished for el in snapshot if not (el.is_leader or el.allow_failure)]
return reduce(lambda a, b: a and b, finished), [el.number for el in snapshot if
not el.is_leader and not el.is_finished]
while True:
finished, waiting_list = others_finished()
if finished: break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
try:
wait_others_to_finish()
final_snapshot = matrix_snapshot()
log.info("Final Results: {0}".format([(e.number, e.is_succeeded, e.allow_failure) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not (el.is_leader or el.allow_failure)]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
| apache-2.0 |
suutari/shoop | shuup_tests/core/test_sales_unit.py | 1 | 1391 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from decimal import Decimal
import pytest
from django.test import override_settings
from shuup.core.models import SalesUnit
def test_sales_unit_decimals():
assert SalesUnit(decimals=0).quantity_step == 1
assert not SalesUnit(decimals=0).allow_fractions
assert SalesUnit(decimals=1).quantity_step == Decimal("0.1")
assert SalesUnit(decimals=1).allow_fractions
assert SalesUnit(decimals=10).quantity_step == Decimal("0.0000000001")
assert SalesUnit(decimals=2).round("1.509") == Decimal("1.51")
assert SalesUnit(decimals=0).round("1.5") == Decimal("2")
@pytest.mark.django_db
@override_settings(**{"LANGUAGES": (("en", "en"), ("fi", "fi"))})
def test_sales_unit_str():
unit = SalesUnit()
assert str(unit) == ""
unit.identifier = "test"
assert str(unit) == "test"
unit.set_current_language("en")
unit.name = "en"
assert str(unit) == "en"
unit.set_current_language("fi")
unit.name = "fi"
assert str(unit) == "fi"
unit.set_current_language("en")
assert unit.name == "en"
# test fallback
unit.set_current_language("ja")
assert unit.name == "en"
| agpl-3.0 |
sencha/chromium-spacewalk | build/android/pylib/utils/apk_helper.py | 52 | 2208 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing utilities for apk packages."""
import os.path
import re
from pylib import cmd_helper
from pylib import constants
_AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt')
_MANIFEST_ATTRIBUTE_RE = re.compile(
r'\s*A: ([^\(\)= ]*)\([^\(\)= ]*\)="(.*)" \(Raw: .*\)$')
_MANIFEST_ELEMENT_RE = re.compile(r'\s*(?:E|N): (\S*) .*$')
def GetPackageName(apk_path):
"""Returns the package name of the apk."""
aapt_cmd = [_AAPT_PATH, 'dump', 'badging', apk_path]
aapt_output = cmd_helper.GetCmdOutput(aapt_cmd).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
m = package_name_re.match(line)
if m:
return m.group(1)
raise Exception('Failed to determine package name of %s' % apk_path)
def _ParseManifestFromApk(apk_path):
aapt_cmd = [_AAPT_PATH, 'dump', 'xmltree', apk_path, 'AndroidManifest.xml']
aapt_output = cmd_helper.GetCmdOutput(aapt_cmd).split('\n')
parsed_manifest = {}
node_stack = [parsed_manifest]
indent = ' '
for line in aapt_output[1:]:
if len(line) == 0:
continue
indent_depth = 0
while line[(len(indent) * indent_depth):].startswith(indent):
indent_depth += 1
node_stack = node_stack[:indent_depth]
node = node_stack[-1]
m = _MANIFEST_ELEMENT_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = {}
node_stack += [node[m.group(1)]]
continue
m = _MANIFEST_ATTRIBUTE_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = []
node[m.group(1)].append(m.group(2))
continue
return parsed_manifest
def GetInstrumentationName(
apk_path, default='android.test.InstrumentationTestRunner'):
"""Returns the name of the Instrumentation in the apk."""
try:
manifest_info = _ParseManifestFromApk(apk_path)
return manifest_info['manifest']['instrumentation']['android:name'][0]
except KeyError:
return default
| bsd-3-clause |
hubsaysnuaa/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/LoginTest.py | 384 | 1320 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
if __name__<>"package":
from ServerParameter import *
from lib.gui import *
class LoginTest:
def __init__(self):
if not loginstatus:
Change(None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
barseghyanartur/django-authority | authority/managers.py | 13 | 2352 | from django.db import models
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class PermissionManager(models.Manager):
def get_content_type(self, obj):
return ContentType.objects.get_for_model(obj)
def get_for_model(self, obj):
return self.filter(content_type=self.get_content_type(obj))
def for_object(self, obj, approved=True):
return self.get_for_model(obj).select_related(
'user', 'creator', 'group', 'content_type'
).filter(object_id=obj.id, approved=approved)
def for_user(self, user, obj, check_groups=True):
perms = self.get_for_model(obj)
if not check_groups:
return perms.select_related('user', 'creator').filter(user=user)
# Hacking user to user__pk to workaround deepcopy bug:
# http://bugs.python.org/issue2460
# Which is triggered by django's deepcopy which backports that fix in
# Django 1.2
return perms.select_related('user', 'user__groups', 'creator').filter(
Q(user__pk=user.pk) | Q(group__in=user.groups.all()))
def user_permissions(
self, user, perm, obj, approved=True, check_groups=True):
return self.for_user(
user,
obj,
check_groups,
).filter(
codename=perm,
approved=approved,
)
def group_permissions(self, group, perm, obj, approved=True):
"""
Get objects that have Group perm permission on
"""
return self.get_for_model(obj).select_related(
'user', 'group', 'creator').filter(group=group, codename=perm,
approved=approved)
def delete_objects_permissions(self, obj):
"""
Delete permissions related to an object instance
"""
perms = self.for_object(obj)
perms.delete()
def delete_user_permissions(self, user, perm, obj, check_groups=False):
"""
Remove granular permission perm from user on an object instance
"""
user_perms = self.user_permissions(user, perm, obj, check_groups=False)
if not user_perms.filter(object_id=obj.id):
return
perms = self.user_permissions(user, perm, obj).filter(object_id=obj.id)
perms.delete()
| bsd-3-clause |
jeremypogue/ansible | lib/ansible/parsing/dataloader.py | 3 | 17974 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import os
import json
import subprocess
import tempfile
from yaml import YAMLError
from ansible.compat.six import text_type, string_types
from ansible.errors import AnsibleFileNotFound, AnsibleParserError, AnsibleError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.module_utils.basic import is_executable
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.vault import VaultLib
from ansible.parsing.quoting import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DataLoader():
'''
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
# optionally: dl.set_vault_password('foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
def __init__(self):
self._basedir = '.'
self._FILE_CACHE = dict()
self._tempfiles = set()
# initialize the vault stuff with an empty password
self.set_vault_password(None)
def set_vault_password(self, vault_password):
self._vault_password = vault_password
self._vault = VaultLib(password=vault_password)
def load(self, data, file_name='<string>', show_content=True):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
new_data = None
try:
# we first try to load this data as JSON
new_data = json.loads(data)
except:
# must not be JSON, let the rest try
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
in_data = text_type(data)
else:
in_data = data
try:
new_data = self._safe_load(in_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
if isinstance(data, AnsibleUnicode):
new_data = AnsibleUnicode(new_data)
new_data.ansible_pos = data.ansible_pos
return new_data
def load_from_file(self, file_name):
''' Loads data from a file, which can contain either JSON or YAML. '''
file_name = self.path_dwim(file_name)
# if the file has already been read in and cached, we'll
# return those results to avoid more file/vault operations
if file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
# read the file contents and load the data structure from them
(file_data, show_content) = self._get_file_contents(file_name)
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
# cache the file contents for next time
self._FILE_CACHE[file_name] = parsed_data
# return a deep copy here, so the cache is not affected
return copy.deepcopy(parsed_data)
def path_exists(self, path):
path = self.path_dwim(path)
return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
def is_file(self, path):
path = self.path_dwim(path)
return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
def is_directory(self, path):
path = self.path_dwim(path)
return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
def list_directory(self, path):
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path):
'''is the given path executable?'''
path = self.path_dwim(path)
return is_executable(path)
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name, self._vault_password)
try:
return loader.get_single_data()
finally:
try:
loader.dispose()
except AttributeError:
pass # older versions of yaml don't have dispose function, ignore
def _get_file_contents(self, file_name):
'''
Reads the file contents from the given file name, and will decrypt them
if they are found to be vault-encrypted.
'''
if not file_name or not isinstance(file_name, string_types):
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
b_file_name = to_bytes(file_name)
if not self.path_exists(b_file_name) or not self.is_file(b_file_name):
raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % file_name)
show_content = True
try:
with open(b_file_name, 'rb') as f:
data = f.read()
if self._vault.is_encrypted(data):
data = self._vault.decrypt(data, filename=b_file_name)
show_content = False
data = to_text(data, errors='surrogate_or_strict')
return (data, show_content)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
def _handle_error(self, yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
def get_basedir(self):
''' returns the current basedir '''
return self._basedir
def set_basedir(self, basedir):
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
self._basedir = to_text(basedir)
def path_dwim(self, given):
'''
make relative paths work like folks expect.
'''
given = unquote(given)
given = to_text(given, errors='surrogate_or_strict')
if given.startswith(u"/"):
return os.path.abspath(given)
elif given.startswith(u"~"):
return os.path.abspath(os.path.expanduser(given))
else:
basedir = to_text(self._basedir, errors='surrogate_or_strict')
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(self, path, dirname, source):
'''
find one file in either a role or playbook dir with or without
explicitly named dirname subdirs
Used in action plugins and lookups to find supplemental files that
could be in either place.
'''
search = []
isrole = False
# I have full path, nothing else needs to be looked at
if source.startswith('~') or source.startswith(os.path.sep):
search.append(self.path_dwim(source))
else:
# base role/play path + templates/files/vars + relative filename
search.append(os.path.join(path, dirname, source))
basedir = unfrackpath(path)
# is it a role and if so make sure you get correct base path
if path.endswith('tasks') and os.path.exists(to_bytes(os.path.join(path,'main.yml'), errors='surrogate_or_strict')) \
or os.path.exists(to_bytes(os.path.join(path,'tasks/main.yml'), errors='surrogate_or_strict')):
isrole = True
if path.endswith('tasks'):
basedir = unfrackpath(os.path.dirname(path))
cur_basedir = self._basedir
self.set_basedir(basedir)
# resolved base role/play path + templates/files/vars + relative filename
search.append(self.path_dwim(os.path.join(basedir, dirname, source)))
self.set_basedir(cur_basedir)
if isrole and not source.endswith(dirname):
# look in role's tasks dir w/o dirname
search.append(self.path_dwim(os.path.join(basedir, 'tasks', source)))
# try to create absolute path for loader basedir + templates/files/vars + filename
search.append(self.path_dwim(os.path.join(dirname,source)))
search.append(self.path_dwim(os.path.join(basedir, source)))
# try to create absolute path for loader basedir + filename
search.append(self.path_dwim(source))
for candidate in search:
if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
break
return candidate
def path_dwim_relative_stack(self, paths, dirname, source):
'''
find one file in first path in stack taking roles into account and adding play basedir as fallback
:arg paths: A list of text strings which are the paths to look for the filename in.
:arg dirname: A text string representing a directory. The directory
is prepended to the source to form the path to search for.
:arg source: A text string which is the filename to search for
:rtype: A text string
:returns: An absolute path to the filename ``source``
'''
b_dirname = to_bytes(dirname)
b_source = to_bytes(source)
result = None
if not source:
display.warning('Invalid request to find a file that matches an empty string or "null" value')
elif source.startswith('~') or source.startswith(os.path.sep):
# path is absolute, no relative needed, check existence and return source
test_path = unfrackpath(b_source)
if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
result = test_path
else:
search = []
for path in paths:
upath = unfrackpath(path)
b_upath = to_bytes(upath, errors='surrogate_or_strict')
b_mydir = os.path.dirname(b_upath)
# if path is in role and 'tasks' not there already, add it into the search
if b_upath.endswith(b'tasks') and os.path.exists(os.path.join(b_upath, b'main.yml')) \
or os.path.exists(os.path.join(b_upath, b'tasks/main.yml')) \
or os.path.exists(os.path.join(b_mydir, b'tasks/main.yml')):
if b_mydir.endswith(b'tasks'):
search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source))
search.append(os.path.join(b_mydir, b_source))
else:
search.append(os.path.join(b_upath, b_dirname, b_source))
search.append(os.path.join(b_upath, b'tasks', b_source))
elif b_dirname not in b_source.split(b'/'):
# don't add dirname if user already is using it in source
search.append(os.path.join(b_upath, b_dirname, b_source))
search.append(os.path.join(b_upath, b_source))
# always append basedir as last resort
search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
search.append(os.path.join(to_bytes(self.get_basedir()), b_source))
display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
for b_candidate in search:
display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
if os.path.exists(b_candidate):
result = to_text(b_candidate)
break
return result
def read_vault_password_file(self, vault_password_file):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
this_path = os.path.realpath(to_bytes(os.path.expanduser(vault_password_file), errors='surrogate_or_strict'))
if not os.path.exists(to_bytes(this_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("The vault password file %s was not found" % this_path)
if self.is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s)."
" If this is not a script, remove the executable bit from the file." % (' '.join(this_path), to_native(e)))
stdout, stderr = p.communicate()
self.set_vault_password(stdout.strip('\r\n'))
else:
try:
f = open(this_path, "rb")
self.set_vault_password(f.read().strip())
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def get_real_file(self, file_path):
"""
If the file is vault encrypted return a path to a temporary decrypted file
If the file is not encrypted then the path is returned
Temporary files are cleanup in the destructor
"""
if not file_path or not isinstance(file_path, string_types):
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % to_native(file_path))
if not self._vault:
self._vault = VaultLib(password="")
real_path = self.path_dwim(file_path)
try:
with open(to_bytes(real_path), 'rb') as f:
if self._vault.is_encrypted_file(f):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
data = f.read()
if not self._vault_password:
raise AnsibleParserError("A vault password must be specified to decrypt %s" % file_path)
data = self._vault.decrypt(data, filename=real_path)
# Make a temp file
real_path = self._create_content_tempfile(data)
self._tempfiles.add(real_path)
return real_path
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)))
def cleanup_tmp_file(self, file_path):
"""
Removes any temporary files created from a previous call to
get_real_file. file_path must be the path returned from a
previous call to get_real_file.
"""
if file_path in self._tempfiles:
os.unlink(file_path)
self._tempfiles.remove(file_path)
def cleanup_all_tmp_files(self):
for f in self._tempfiles:
try:
self.cleanup_tmp_file(f)
except:
pass # TODO: this should at least warn
| gpl-3.0 |
bob-the-hamster/commandergenius | project/jni/python/src/PC/VS8.0/build_ssl.py | 32 | 9154 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen(perl + ' -e "use Win32;"')
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure)
os.system("perl Configure "+configure)
print(do_script)
os.system(do_script)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl is None:
print("No Perl installation was found. Existing Makefiles are used.")
print("Found a working perl at '%s'" % (perl,))
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
shutil.copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
shutil.copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# Now run make.
if arch == "amd64":
rc = os.system(r"ml64 -c -Foms\uptable.obj ms\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
shutil.copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
shutil.copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| lgpl-2.1 |
thomasrogers03/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py | 124 | 3119 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
class TestDelegate(MessagePumpDelegate):
def __init__(self):
self.log = []
def schedule(self, interval, callback):
self.callback = callback
self.log.append("schedule")
def message_available(self, message):
self.log.append("message_available: %s" % message)
def final_message_delivered(self):
self.log.append("final_message_delivered")
class MessagePumpTest(unittest.TestCase):
def test_basic(self):
queue = ThreadedMessageQueue()
delegate = TestDelegate()
pump = MessagePump(delegate, queue)
self.assertEqual(delegate.log, [
'schedule'
])
delegate.callback()
queue.post("Hello")
queue.post("There")
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule'
])
queue.post("More")
queue.post("Messages")
queue.stop()
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule',
'message_available: More',
'message_available: Messages',
'final_message_delivered'
])
| bsd-3-clause |
mcsalgado/ansible | test/units/playbook/test_block.py | 228 | 2451 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.compat.tests import unittest
class TestBlock(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_construct_empty_block(self):
b = Block()
def test_construct_block_with_role(self):
pass
def test_load_block_simple(self):
ds = dict(
block = [],
rescue = [],
always = [],
#otherwise = [],
)
b = Block.load(ds)
self.assertEqual(b.block, [])
self.assertEqual(b.rescue, [])
self.assertEqual(b.always, [])
# not currently used
#self.assertEqual(b.otherwise, [])
def test_load_block_with_tasks(self):
ds = dict(
block = [dict(action='block')],
rescue = [dict(action='rescue')],
always = [dict(action='always')],
#otherwise = [dict(action='otherwise')],
)
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
self.assertEqual(len(b.rescue), 1)
self.assertIsInstance(b.rescue[0], Task)
self.assertEqual(len(b.always), 1)
self.assertIsInstance(b.always[0], Task)
# not currently used
#self.assertEqual(len(b.otherwise), 1)
#self.assertIsInstance(b.otherwise[0], Task)
def test_load_implicit_block(self):
ds = [dict(action='foo')]
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
| gpl-3.0 |
DelazJ/QGIS | cmake/FindQsci.py | 77 | 2612 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Larry Shaffer <larry@dakotacarto.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Larry Shaffer <larry@dakotacarto.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Larry Shaffer <larry@dakotacarto.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Larry Shaffer <larry@dakotacarto.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Find QScintilla2 PyQt4/PyQt5 module version.
.. note:: Redistribution and use is allowed according to the terms of the BSD
license. For details see the accompanying COPYING-CMAKE-SCRIPTS file.
"""
__author__ = 'Larry Shaffer (larry@dakotacarto.com)'
__date__ = '22/10/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import sys
VER = ""
if len(sys.argv) > 0:
if sys.argv[1] == "4":
from PyQt4.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
else:
from PyQt5.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
else:
try:
from PyQt4.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
except ImportError:
try:
from PyQt5.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
except ImportError:
pass
print("qsci_version_str:%s" % VER)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.