repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
favedit/MoCross | Source/Library/LibFreeType/source/tools/chktrcmp.py | 381 | 3826 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| apache-2.0 |
kashif/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 59 | 35604 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
christabor/flask_jsondash | tests/test_model_factories.py | 1 | 4810 | import os
import json
from click.testing import CliRunner
from flask_jsondash import model_factories
from flask_jsondash.settings import CHARTS_CONFIG
from conftest import read
_db = model_factories.adapter
def test_get_random_group():
conf_vals = CHARTS_CONFIG.values()
data = model_factories.get_random_group()
assert isinstance(data, dict)
assert 'charts' in data
assert data in conf_vals
def test_get_random_chart():
chart = model_factories.get_random_group()
data = model_factories.get_random_chart(chart)
assert isinstance(data, tuple)
def test_make_fake_dashboard():
fdash = model_factories.make_fake_dashboard(name='Foo', max_charts=4)
assert isinstance(fdash, dict)
assert fdash.get('name') == 'Foo'
def test_make_fake_chart_data():
chartdata = model_factories.make_fake_chart_data(name='Foo')
chartconfig = json.loads(chartdata[1])
assert isinstance(chartdata, tuple)
assert isinstance(chartconfig, dict)
assert chartconfig.get('name') == 'Foo'
def test_insert_dashboards(monkeypatch):
records = []
runner = CliRunner()
args = ['--max-charts', 5, '--records', 5]
monkeypatch.setattr(_db, 'create', lambda *a, **kw: records.append(a))
result = runner.invoke(model_factories.insert_dashboards, args)
assert result.exit_code == 0
assert len(records) == 5
def test_delete_all(monkeypatch):
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
assert model_factories.delete_all() is None
def test_load_fixtures(monkeypatch):
records = []
runner = CliRunner()
args = ['--fixtures', 'example_app/examples/config']
monkeypatch.setattr(_db, 'create', lambda *a, **kw: records.append(a))
result = runner.invoke(model_factories.insert_dashboards, args)
assert result.exit_code == 0
assert len(records) == 19 # Changed as new examples are added.
def test_dump_fixtures_empty(monkeypatch, tmpdir):
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath]
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Nothing to dump.' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == len(records)
def test_dump_fixtures(monkeypatch, tmpdir):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(10)]
# Also ensure _id is popped off.
for r in records:
r.update(_id='foo')
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath]
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == len(records)
def test_dump_fixtures_delete(monkeypatch, tmpdir):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(10)]
def delete_all():
global records
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath, '--delete']
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == 10
assert len(read()) == 0
def test_dump_fixtures_delete_bad_path_show_errors_no_exception(monkeypatch):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(1)]
def delete_all():
global records
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
runner = CliRunner()
args = ['--dump', '/fakepath/', '--delete']
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(read()) == 0
err_msg = "The following records could not be dumped: ['//fakepath/"
assert err_msg in result.output
def test_delete_all_cli(monkeypatch):
runner = CliRunner()
args = ['--delete']
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
assert model_factories.delete_all() is None
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Deleting all records!' in result.output
assert result.exit_code == 0
| mit |
vipul-sharma20/oh-mainline | mysite/customs/migrations/0027_auto__add_rounduptrackermodel__add_roundupquerymodel.py | 17 | 13511 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RoundupTrackerModel'
db.create_table('customs_rounduptrackermodel', (
('trackermodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['customs.TrackerModel'], unique=True, primary_key=True)),
('tracker_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),
('base_url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=200)),
('closed_status', self.gf('django.db.models.fields.CharField')(max_length=200)),
('bitesized_field', self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True)),
('bitesized_text', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True)),
('documentation_field', self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True)),
('documentation_text', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True)),
('as_appears_in_distribution', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True)),
))
db.send_create_signal('customs', ['RoundupTrackerModel'])
# Adding model 'RoundupQueryModel'
db.create_table('customs_roundupquerymodel', (
('trackerquerymodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['customs.TrackerQueryModel'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=400)),
('description', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True)),
('tracker', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['customs.RoundupTrackerModel'])),
))
db.send_create_signal('customs', ['RoundupQueryModel'])
def backwards(self, orm):
# Deleting model 'RoundupTrackerModel'
db.delete_table('customs_rounduptrackermodel')
# Deleting model 'RoundupQueryModel'
db.delete_table('customs_roundupquerymodel')
models = {
'customs.bugzillaquerymodel': {
'Meta': {'object_name': 'BugzillaQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'query_type': ('django.db.models.fields.CharField', [], {'default': "'xml'", 'max_length': '20'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.BugzillaTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'customs.bugzillatrackermodel': {
'Meta': {'object_name': 'BugzillaTrackerModel', '_ormbases': ['customs.TrackerModel']},
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bitesized_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'bug_project_name_format': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'query_url_type': ('django.db.models.fields.CharField', [], {'default': "'xml'", 'max_length': '20'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.googlequerymodel': {
'Meta': {'object_name': 'GoogleQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.GoogleTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.googletrackermodel': {
'Meta': {'object_name': 'GoogleTrackerModel', '_ormbases': ['customs.TrackerModel']},
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bitesized_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'google_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.recentmessagefromcia': {
'Meta': {'object_name': 'RecentMessageFromCIA'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'committer_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_received': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customs.roundupquerymodel': {
'Meta': {'object_name': 'RoundupQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.RoundupTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'customs.rounduptrackermodel': {
'Meta': {'object_name': 'RoundupTrackerModel', '_ormbases': ['customs.TrackerModel']},
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'bitesized_field': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'closed_status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'documentation_field': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.tracbugtimes': {
'Meta': {'object_name': 'TracBugTimes'},
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'latest_timeline_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'blank': 'True'}),
'timeline': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.TracTimeline']"})
},
'customs.trackermodel': {
'Meta': {'object_name': 'TrackerModel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_connections': ('django.db.models.fields.IntegerField', [], {'default': '8', 'blank': 'True'})
},
'customs.trackerquerymodel': {
'Meta': {'object_name': 'TrackerQueryModel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'})
},
'customs.tracquerymodel': {
'Meta': {'object_name': 'TracQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.TracTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'customs.tractimeline': {
'Meta': {'object_name': 'TracTimeline'},
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'})
},
'customs.tractrackermodel': {
'Meta': {'object_name': 'TracTrackerModel', '_ormbases': ['customs.TrackerModel']},
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bitesized_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'bug_project_name_format': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'old_trac': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.webresponse': {
'Meta': {'object_name': 'WebResponse'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['customs']
| agpl-3.0 |
clairityproject/backend | data/migrations/0003_auto__add_field_node_offline__del_field_latest_dylos_bin_4__del_field_.py | 1 | 18935 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Node.offline'
db.add_column(u'data_node', 'offline',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Latest.dylos_bin_4'
db.delete_column(u'data_latest', 'dylos_bin_4')
# Deleting field 'Latest.dylos_bin_3'
db.delete_column(u'data_latest', 'dylos_bin_3')
# Deleting field 'Latest.dylos_bin_2'
db.delete_column(u'data_latest', 'dylos_bin_2')
# Deleting field 'Latest.dylos_bin_1'
db.delete_column(u'data_latest', 'dylos_bin_1')
# Deleting field 'Latest.alphasense_3'
db.delete_column(u'data_latest', 'alphasense_3')
# Deleting field 'Latest.alphasense_2'
db.delete_column(u'data_latest', 'alphasense_2')
# Deleting field 'Latest.alphasense_1'
db.delete_column(u'data_latest', 'alphasense_1')
# Deleting field 'Latest.alphasense_7'
db.delete_column(u'data_latest', 'alphasense_7')
# Deleting field 'Latest.alphasense_6'
db.delete_column(u'data_latest', 'alphasense_6')
# Deleting field 'Latest.alphasense_5'
db.delete_column(u'data_latest', 'alphasense_5')
# Deleting field 'Latest.alphasense_4'
db.delete_column(u'data_latest', 'alphasense_4')
# Deleting field 'Latest.alphasense_8'
db.delete_column(u'data_latest', 'alphasense_8')
# Adding field 'Latest.big_particles'
db.add_column(u'data_latest', 'big_particles',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.small_particles'
db.add_column(u'data_latest', 'small_particles',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.no'
db.add_column(u'data_latest', 'no',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.no2'
db.add_column(u'data_latest', 'no2',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.co'
db.add_column(u'data_latest', 'co',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.o3'
db.add_column(u'data_latest', 'o3',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Node.offline'
db.delete_column(u'data_node', 'offline')
# Adding field 'Latest.dylos_bin_4'
db.add_column(u'data_latest', 'dylos_bin_4',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.dylos_bin_3'
db.add_column(u'data_latest', 'dylos_bin_3',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.dylos_bin_2'
db.add_column(u'data_latest', 'dylos_bin_2',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.dylos_bin_1'
db.add_column(u'data_latest', 'dylos_bin_1',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_3'
db.add_column(u'data_latest', 'alphasense_3',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_2'
db.add_column(u'data_latest', 'alphasense_2',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_1'
db.add_column(u'data_latest', 'alphasense_1',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_7'
db.add_column(u'data_latest', 'alphasense_7',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_6'
db.add_column(u'data_latest', 'alphasense_6',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_5'
db.add_column(u'data_latest', 'alphasense_5',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_4'
db.add_column(u'data_latest', 'alphasense_4',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Latest.alphasense_8'
db.add_column(u'data_latest', 'alphasense_8',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Deleting field 'Latest.big_particles'
db.delete_column(u'data_latest', 'big_particles')
# Deleting field 'Latest.small_particles'
db.delete_column(u'data_latest', 'small_particles')
# Deleting field 'Latest.no'
db.delete_column(u'data_latest', 'no')
# Deleting field 'Latest.no2'
db.delete_column(u'data_latest', 'no2')
# Deleting field 'Latest.co'
db.delete_column(u'data_latest', 'co')
# Deleting field 'Latest.o3'
db.delete_column(u'data_latest', 'o3')
models = {
u'data.alphasense': {
'Meta': {'object_name': 'Alphasense'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'alphasense_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_5': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_6': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_7': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_8': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'co': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'no': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'o3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'data.datapoint': {
'Meta': {'object_name': 'DataPoint'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'alphasense_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_5': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_6': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_7': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_8': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.dylos': {
'Meta': {'object_name': 'Dylos'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'big_particles': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'small_particles': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'data.latest': {
'Meta': {'object_name': 'Latest'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'big_particles': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'indoor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'no': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_index': 'True'}),
'o3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'small_particles': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.met': {
'Meta': {'object_name': 'Met'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.node': {
'Meta': {'object_name': 'Node'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'indoor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_index': 'True'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'data.sensordetail': {
'Meta': {'object_name': 'SensorDetail'},
'co_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'co_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'co_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no2_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'no2_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'no_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'o3_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'o3_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'o3_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['data'] | mit |
h3biomed/ansible | lib/ansible/modules/utilities/logic/async_wrapper.py | 17 | 11406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
import multiprocessing
from ansible.module_utils._text import to_text
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# pipe for communication between forked process and parent
ipc_watcher, ipc_notifier = multiprocessing.Pipe()
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
# signal grandchild process started and isolated from being terminated
# by the connection being closed sending a signal to the job group
ipc_notifier.send(True)
ipc_notifier.close()
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": to_text(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except Exception:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
# close off notifier handle in grandparent, probably unnecessary as
# this process doesn't hang around long enough
ipc_notifier.close()
# allow waiting up to 2.5 seconds in total should be long enough for worst
# loaded environment in practice.
retries = 25
while retries > 0:
if ipc_watcher.poll(0.1):
break
else:
retries = retries - 1
continue
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# close off the receiving end of the pipe from child process
ipc_watcher.close()
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# close off inherited pipe handles
ipc_watcher.close()
ipc_notifier.close()
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)" % (sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)" % (sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s" % (sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s " % sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)" % os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)" % os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s" % e)
print(json.dumps({
"failed": True,
"msg": "FATAL ERROR: %s" % e
}))
sys.exit(1)
| gpl-3.0 |
wangpanjun/django-rest-framework | rest_framework/utils/mediatypes.py | 98 | 2745 | """
Handling of media types, as found in HTTP Content-Type and Accept headers.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7
"""
from __future__ import unicode_literals
from django.http.multipartparser import parse_header
from django.utils.encoding import python_2_unicode_compatible
from rest_framework import HTTP_HEADER_ENCODING
def media_type_matches(lhs, rhs):
"""
Returns ``True`` if the media type in the first argument <= the
media type in the second argument. The media types are strings
as described by the HTTP spec.
Valid media type strings include:
'application/json; indent=4'
'application/json'
'text/*'
'*/*'
"""
lhs = _MediaType(lhs)
rhs = _MediaType(rhs)
return lhs.match(rhs)
def order_by_precedence(media_type_lst):
"""
Returns a list of sets of media type strings, ordered by precedence.
Precedence is determined by how specific a media type is:
3. 'type/subtype; param=val'
2. 'type/subtype'
1. 'type/*'
0. '*/*'
"""
ret = [set(), set(), set(), set()]
for media_type in media_type_lst:
precedence = _MediaType(media_type).precedence
ret[3 - precedence].add(media_type)
return [media_types for media_types in ret if media_types]
@python_2_unicode_compatible
class _MediaType(object):
def __init__(self, media_type_str):
if media_type_str is None:
media_type_str = ''
self.orig = media_type_str
self.full_type, self.params = parse_header(media_type_str.encode(HTTP_HEADER_ENCODING))
self.main_type, sep, self.sub_type = self.full_type.partition('/')
def match(self, other):
"""Return true if this MediaType satisfies the given MediaType."""
for key in self.params.keys():
if key != 'q' and other.params.get(key, None) != self.params.get(key, None):
return False
if self.sub_type != '*' and other.sub_type != '*' and other.sub_type != self.sub_type:
return False
if self.main_type != '*' and other.main_type != '*' and other.main_type != self.main_type:
return False
return True
@property
def precedence(self):
"""
Return a precedence level from 0-3 for the media type given how specific it is.
"""
if self.main_type == '*':
return 0
elif self.sub_type == '*':
return 1
elif not self.params or list(self.params.keys()) == ['q']:
return 2
return 3
def __str__(self):
ret = "%s/%s" % (self.main_type, self.sub_type)
for key, val in self.params.items():
ret += "; %s=%s" % (key, val)
return ret
| bsd-2-clause |
Eric-Gaudiello/tensorflow_dev | tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/tensorflow/python/framework/framework_lib.py | 5 | 2996 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""Classes and functions for building TensorFlow graphs.
## Core graph data structures
@@Graph
@@Operation
@@Tensor
## Tensor types
@@DType
@@as_dtype
## Utility functions
@@device
@@name_scope
@@control_dependencies
@@convert_to_tensor
@@convert_to_tensor_or_indexed_slices
@@get_default_graph
@@import_graph_def
## Graph collections
@@add_to_collection
@@get_collection
@@GraphKeys
## Defining new operations
@@RegisterGradient
@@NoGradient
@@RegisterShape
@@TensorShape
@@Dimension
@@op_scope
@@get_seed
"""
# Classes used when building a Graph.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import SparseTensor
from tensorflow.python.framework.ops import SparseTensorValue
from tensorflow.python.framework.ops import IndexedSlices
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.importer import import_graph_def
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.framework.dtypes import *
| gpl-3.0 |
111t8e/h2o-2 | py/testdir_hosts/start_cloud.py | 9 | 2357 | # this lets me be lazy..starts the cloud up like I want from my json, and gives me a browser
# copies the jars for me, etc. Just hangs at the end for 10 minutes while I play with the browser
import unittest
import time,sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs as h2j
import h2o_common
import h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# Uses your username specific json: pytest_config-<username>.json
# do what my json says, but with my hdfs. hdfs_name_node from the json
h2o.init(use_hdfs=True)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_1(self):
h2b.browseTheCloud()
csvFilename = "airlines_all.csv"
csvPathname='airlines/airlines_all.csv'
hex_key = csvFilename + ".hex"
start = time.time()
timeoutSecs=1200
# airlines_hex = h2i.import_parse(bucket='/home/0xdiag/datasets', path=csvPathname, schema='local', hex_key=hex_key,
# timeoutSecs=timeoutSecs, retryDelaySecs=4, pollTimeoutSecs=60, doSummary=False)
# print "fv.parse done in ",(time.time()-start)
# kwargs = {
# 'ignored_cols':'DepTime,ArrTime,TailNum,ActualElapsedTime,AirTime,ArrDelay,DepDelay,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay,IsArrDelayed',
# 'standardize': 1,
# 'classification': 1,
# 'response': 'IsDepDelayed',
# 'family': 'binomial',
# 'n_folds': 0,
# 'max_iter': 50,
# 'beta_epsilon': 1e-4,
# 'lambda':1e-5
# }
# results = []
# for i in range(5):
# start = time.time()
# glm = h2o_cmd.runGLM(parseResult=airlines_hex, timeoutSecs=timeoutSecs, **kwargs)
# auc = glm['glm_model']['submodels'][0]['validation']['auc']
# results.append('glm2(%d) done in %d,auc=%f' %(i,(time.time()-start),auc))
# for s in results:
# print s
while 1:
time.sleep(500000)
print '.'
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
thnee/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_notification.py | 77 | 5180 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson (@smashwilson)
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: me@mailhost.com
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
| gpl-3.0 |
utamaro/youtube-dl | youtube_dl/extractor/spiegel.py | 79 | 5600 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from .spiegeltv import SpiegeltvIE
class SpiegelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
_TESTS = [{
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
'md5': '2c2754212136f35fb4b19767d242f66e',
'info_dict': {
'id': '1259285',
'ext': 'mp4',
'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv',
'description': 'md5:8029d8310232196eb235d27575a8b9f4',
'duration': 49,
},
}, {
'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
'md5': 'f2cdf638d7aa47654e251e1aee360af1',
'info_dict': {
'id': '1309159',
'ext': 'mp4',
'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers',
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
'duration': 983,
},
}, {
'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
'info_dict': {
'id': '1519126',
'ext': 'mp4',
'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
}
}, {
'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, handle = self._download_webpage_handle(url, video_id)
# 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
if SpiegeltvIE.suitable(handle.geturl()):
return self.url_result(handle.geturl(), 'Spiegeltv')
title = re.sub(r'\s+', ' ', self._html_search_regex(
r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>',
webpage, 'title'))
description = self._html_search_meta('description', webpage, 'description')
base_url = self._search_regex(
r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL')
xml_url = base_url + video_id + '.xml'
idoc = self._download_xml(xml_url, video_id)
formats = []
for n in list(idoc):
if n.tag.startswith('type') and n.tag != 'type6':
format_id = n.tag.rpartition('type')[2]
video_url = base_url + n.find('./filename').text
formats.append({
'format_id': format_id,
'url': video_url,
'width': int(n.find('./width').text),
'height': int(n.find('./height').text),
'abr': int(n.find('./audiobitrate').text),
'vbr': int(n.find('./videobitrate').text),
'vcodec': n.find('./codec').text,
'acodec': 'MP4A',
})
duration = float(idoc[0].findall('./duration')[0].text)
self._check_formats(formats, video_id)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'formats': formats,
}
class SpiegelArticleIE(InfoExtractor):
_VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
IE_NAME = 'Spiegel:Article'
IE_DESC = 'Articles on spiegel.de'
_TESTS = [{
'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
'info_dict': {
'id': '1516455',
'ext': 'mp4',
'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
'description': 're:^Patrick Kämnitz gehört.{100,}',
},
}, {
'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
'info_dict': {
},
'playlist_count': 6,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# Single video on top of the page
video_link = self._search_regex(
r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
'video page URL', default=None)
if video_link:
video_url = compat_urlparse.urljoin(
self.http_scheme() + '//spiegel.de/', video_link)
return self.url_result(video_url)
# Multiple embedded videos
embeds = re.findall(
r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
webpage)
entries = [
self.url_result(compat_urlparse.urljoin(
self.http_scheme() + '//spiegel.de/', embed_path))
for embed_path in embeds
]
return self.playlist_result(entries)
| unlicense |
NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/requests/requests/packages/urllib3/connectionpool.py | 196 | 31221 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
# Queue is imported for side effects on MS Windows
import Queue as _unused_module_Queue # noqa: unused
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close():
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
try:
assert_header_parsing(httplib_response.msg)
except HeaderParsingError as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
conn = conn and conn.close()
release_conn = True
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
conn = conn and conn.close()
release_conn = True
raise
except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn = conn and conn.close()
release_conn = True
if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Release the connection for this response, since we're not
# returning it to be released manually.
response.release_conn()
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| mit |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/requests-2.2.1-py2.7.egg/requests/packages/urllib3/__init__.py | 650 | 1701 | # urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| gpl-2.0 |
sharhar/USB-Thing | UpdaterFiles/Lib/python-3.5.1.amd64/Lib/site-packages/pip/utils/appdirs.py | 311 | 9173 | """
This code was taken from https://github.com/ActiveState/appdirs and modified
to suite our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = os.path.expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")),
appname,
)
return path
def user_log_dir(appname):
"""
Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if
defined
Win XP: C:\Documents and Settings\<username>\Local Settings\ ...
...Application Data\<AppName>\Logs
Vista: C:\\Users\<username>\AppData\Local\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
"""
if WINDOWS:
path = os.path.join(user_data_dir(appname), "Logs")
elif sys.platform == "darwin":
path = os.path.join(os.path.expanduser('~/Library/Logs'), appname)
else:
path = os.path.join(user_cache_dir(appname), "log")
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
Mac OS X: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.sep.join([os.path.expanduser(x), appname])
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
| apache-2.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/test/test_coercion.py | 121 | 11399 | import copy
import unittest
from test.test_support import run_unittest, TestFailed, check_warnings
# Fake a number that implements numeric methods through __coerce__
class CoerceNumber:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<CoerceNumber %s>' % repr(self.arg)
def __coerce__(self, other):
if isinstance(other, CoerceNumber):
return self.arg, other.arg
else:
return (self.arg, other)
# New-style class version of CoerceNumber
class CoerceTo(object):
def __init__(self, arg):
self.arg = arg
def __coerce__(self, other):
if isinstance(other, CoerceTo):
return self.arg, other.arg
else:
return self.arg, other
# Fake a number that implements numeric ops through methods.
class MethodNumber:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<MethodNumber %s>' % repr(self.arg)
def __add__(self,other):
return self.arg + other
def __radd__(self,other):
return other + self.arg
def __sub__(self,other):
return self.arg - other
def __rsub__(self,other):
return other - self.arg
def __mul__(self,other):
return self.arg * other
def __rmul__(self,other):
return other * self.arg
def __div__(self,other):
return self.arg / other
def __rdiv__(self,other):
return other / self.arg
def __truediv__(self,other):
return self.arg / other
def __rtruediv__(self,other):
return other / self.arg
def __floordiv__(self,other):
return self.arg // other
def __rfloordiv__(self,other):
return other // self.arg
def __pow__(self,other):
return self.arg ** other
def __rpow__(self,other):
return other ** self.arg
def __mod__(self,other):
return self.arg % other
def __rmod__(self,other):
return other % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
candidates = [2, 2L, 4.0, 2+0j, [1], (2,), None,
MethodNumber(2), CoerceNumber(2)]
infix_binops = [ '+', '-', '*', '**', '%', '//', '/' ]
TE = TypeError
# b = both normal and augmented give same result list
# s = single result lists for normal and augmented
# e = equals other results
# result lists: ['+', '-', '*', '**', '%', '//', ('classic /', 'new /')]
# ^^^^^^^^^^^^^^^^^^^^^^
# 2-tuple if results differ
# else only one value
infix_results = {
# 2
(0,0): ('b', [4, 0, 4, 4, 0, 1, (1, 1.0)]),
(0,1): ('e', (0,0)),
(0,2): ('b', [6.0, -2.0, 8.0, 16.0, 2.0, 0.0, 0.5]),
(0,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(0,4): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(0,5): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(0,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(0,7): ('e', (0,0)),
(0,8): ('e', (0,0)),
# 2L
(1,0): ('e', (0,0)),
(1,1): ('e', (0,1)),
(1,2): ('e', (0,2)),
(1,3): ('e', (0,3)),
(1,4): ('e', (0,4)),
(1,5): ('e', (0,5)),
(1,6): ('e', (0,6)),
(1,7): ('e', (0,7)),
(1,8): ('e', (0,8)),
# 4.0
(2,0): ('b', [6.0, 2.0, 8.0, 16.0, 0.0, 2.0, 2.0]),
(2,1): ('e', (2,0)),
(2,2): ('b', [8.0, 0.0, 16.0, 256.0, 0.0, 1.0, 1.0]),
(2,3): ('b', [6+0j, 2+0j, 8+0j, 16+0j, 0+0j, 2+0j, 2+0j]),
(2,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(2,5): ('e', (2,4)),
(2,6): ('e', (2,4)),
(2,7): ('e', (2,0)),
(2,8): ('e', (2,0)),
# (2+0j)
(3,0): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,1): ('e', (3,0)),
(3,2): ('b', [6+0j, -2+0j, 8+0j, 16+0j, 2+0j, 0+0j, 0.5+0j]),
(3,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(3,5): ('e', (3,4)),
(3,6): ('e', (3,4)),
(3,7): ('e', (3,0)),
(3,8): ('e', (3,0)),
# [1]
(4,0): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(4,1): ('e', (4,0)),
(4,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,3): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,4): ('b', [[1, 1], TE, TE, TE, TE, TE, TE]),
(4,5): ('s', [TE, TE, TE, TE, TE, TE, TE], [[1, 2], TE, TE, TE, TE, TE, TE]),
(4,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,7): ('e', (4,0)),
(4,8): ('e', (4,0)),
# (2,)
(5,0): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(5,1): ('e', (5,0)),
(5,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,3): ('e', (5,2)),
(5,4): ('e', (5,2)),
(5,5): ('b', [(2, 2), TE, TE, TE, TE, TE, TE]),
(5,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,7): ('e', (5,0)),
(5,8): ('e', (5,0)),
# None
(6,0): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(6,1): ('e', (6,0)),
(6,2): ('e', (6,0)),
(6,3): ('e', (6,0)),
(6,4): ('e', (6,0)),
(6,5): ('e', (6,0)),
(6,6): ('e', (6,0)),
(6,7): ('e', (6,0)),
(6,8): ('e', (6,0)),
# MethodNumber(2)
(7,0): ('e', (0,0)),
(7,1): ('e', (0,1)),
(7,2): ('e', (0,2)),
(7,3): ('e', (0,3)),
(7,4): ('e', (0,4)),
(7,5): ('e', (0,5)),
(7,6): ('e', (0,6)),
(7,7): ('e', (0,7)),
(7,8): ('e', (0,8)),
# CoerceNumber(2)
(8,0): ('e', (0,0)),
(8,1): ('e', (0,1)),
(8,2): ('e', (0,2)),
(8,3): ('e', (0,3)),
(8,4): ('e', (0,4)),
(8,5): ('e', (0,5)),
(8,6): ('e', (0,6)),
(8,7): ('e', (0,7)),
(8,8): ('e', (0,8)),
}
def process_infix_results():
for key in sorted(infix_results):
val = infix_results[key]
if val[0] == 'e':
infix_results[key] = infix_results[val[1]]
else:
if val[0] == 's':
res = (val[1], val[2])
elif val[0] == 'b':
res = (val[1], val[1])
for i in range(1):
if isinstance(res[i][6], tuple):
if 1/2 == 0:
# testing with classic (floor) division
res[i][6] = res[i][6][0]
else:
# testing with -Qnew
res[i][6] = res[i][6][1]
infix_results[key] = res
with check_warnings(("classic (int|long) division", DeprecationWarning),
quiet=True):
process_infix_results()
# now infix_results has two lists of results for every pairing.
prefix_binops = [ 'divmod' ]
prefix_results = [
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)],
[(1L,0L), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1L,0L)],
[(2.0,0.0), (2.0,0.0), (1.0,0.0), ((2+0j),0j), TE, TE, TE, TE, (2.0,0.0)],
[((1+0j),0j), ((1+0j),0j), (0j,(2+0j)), ((1+0j),0j), TE, TE, TE, TE, ((1+0j),0j)],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)]
]
def format_float(value):
if abs(value) < 0.01:
return '0.0'
else:
return '%.1f' % value
# avoid testing platform fp quirks
def format_result(value):
if isinstance(value, complex):
return '(%s + %sj)' % (format_float(value.real),
format_float(value.imag))
elif isinstance(value, float):
return format_float(value)
return str(value)
class CoercionTest(unittest.TestCase):
def test_infix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
results = infix_results[(ia, ib)]
for op, res, ires in zip(infix_binops, results[0], results[1]):
if res is TE:
self.assertRaises(TypeError, eval,
'a %s b' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('a %s b' % op)),
'%s %s %s == %s failed' % (a, op, b, res))
try:
z = copy.copy(a)
except copy.Error:
z = a # assume it has no inplace ops
if ires is TE:
try:
exec 'z %s= b' % op
except TypeError:
pass
else:
self.fail("TypeError not raised")
else:
exec('z %s= b' % op)
self.assertEqual(ires, z)
def test_prefix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
for op in prefix_binops:
res = prefix_results[ia][ib]
if res is TE:
self.assertRaises(TypeError, eval,
'%s(a, b)' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('%s(a, b)' % op)),
'%s(%s, %s) == %s failed' % (op, a, b, res))
def test_cmptypes(self):
# Built-in tp_compare slots expect their arguments to have the
# same type, but a user-defined __coerce__ doesn't have to obey.
# SF #980352
evil_coercer = CoerceTo(42)
# Make sure these don't crash any more
self.assertNotEqual(cmp(u'fish', evil_coercer), 0)
self.assertNotEqual(cmp(slice(1), evil_coercer), 0)
# ...but that this still works
class WackyComparer(object):
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
__hash__ = None # Invalid cmp makes this unhashable
self.assertEqual(cmp(WackyComparer(), evil_coercer), 0)
# ...and classic classes too, since that code path is a little different
class ClassicWackyComparer:
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0)
def test_infinite_rec_classic_classes(self):
# if __coerce__() returns its arguments reversed it causes an infinite
# recursion for classic classes.
class Tester:
def __coerce__(self, other):
return other, self
exc = TestFailed("__coerce__() returning its arguments reverse "
"should raise RuntimeError")
try:
Tester() + 1
except (RuntimeError, TypeError):
return
except:
raise exc
else:
raise exc
def test_main():
with check_warnings(("complex divmod.., // and % are deprecated",
DeprecationWarning),
("classic (int|long) division", DeprecationWarning),
quiet=True):
run_unittest(CoercionTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
adazey/Muzez | libs/nltk/tgrep.py | 1 | 39803 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: TGrep search
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Will Roberts <wildwilhelm@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
'''
============================================
TGrep search implementation for NLTK trees
============================================
This module supports TGrep2 syntax for matching parts of NLTK Trees.
Note that many tgrep operators require the tree passed to be a
``ParentedTree``.
External links:
- `Tgrep tutorial <http://www.stanford.edu/dept/linguistics/corpora/cas-tut-tgrep.html>`_
- `Tgrep2 manual <http://tedlab.mit.edu/~dr/Tgrep2/tgrep2.pdf>`_
- `Tgrep2 source <http://tedlab.mit.edu/~dr/Tgrep2/>`_
Usage
=====
>>> from nltk.tree import ParentedTree
>>> from nltk.tgrep import tgrep_nodes, tgrep_positions
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> list(tgrep_nodes('NN', [tree]))
[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]]
>>> list(tgrep_positions('NN', [tree]))
[[(0, 2), (2, 1)]]
>>> list(tgrep_nodes('DT', [tree]))
[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]]
>>> list(tgrep_nodes('DT $ JJ', [tree]))
[[ParentedTree('DT', ['the'])]]
This implementation adds syntax to select nodes based on their NLTK
tree position. This syntax is ``N`` plus a Python tuple representing
the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are
valid node selectors. Example:
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> tree[0,0]
ParentedTree('DT', ['the'])
>>> tree[0,0].treeposition()
(0, 0)
>>> list(tgrep_nodes('N(0,0)', [tree]))
[[ParentedTree('DT', ['the'])]]
Caveats:
========
- Link modifiers: "?" and "=" are not implemented.
- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are
not implemented.
- The "=" and "~" links are not implemented.
Known Issues:
=============
- There are some issues with link relations involving leaf nodes
(which are represented as bare strings in NLTK trees). For
instance, consider the tree::
(S (A x))
The search string ``* !>> S`` should select all nodes which are not
dominated in some way by an ``S`` node (i.e., all nodes which are
not descendants of an ``S``). Clearly, in this tree, the only node
which fulfills this criterion is the top node (since it is not
dominated by anything). However, the code here will find both the
top node and the leaf node ``x``. This is because we cannot recover
the parent of the leaf, since it is stored as a bare string.
A possible workaround, when performing this kind of search, would be
to filter out all leaf nodes.
Implementation notes
====================
This implementation is (somewhat awkwardly) based on lambda functions
which are predicates on a node. A predicate is a function which is
either True or False; using a predicate function, we can identify sets
of nodes with particular properties. A predicate function, could, for
instance, return True only if a particular node has a label matching a
particular regular expression, and has a daughter node which has no
sisters. Because tgrep2 search strings can do things statefully (such
as substituting in macros, and binding nodes with node labels), the
actual predicate function is declared with three arguments::
pred = lambda n, m, l: return True # some logic here
``n``
is a node in a tree; this argument must always be given
``m``
contains a dictionary, mapping macro names onto predicate functions
``l``
is a dictionary to map node labels onto nodes in the tree
``m`` and ``l`` are declared to default to ``None``, and so need not be
specified in a call to a predicate. Predicates which call other
predicates must always pass the value of these arguments on. The
top-level predicate (constructed by ``_tgrep_exprs_action``) binds the
macro definitions to ``m`` and initialises ``l`` to an empty dictionary.
'''
from __future__ import absolute_import, print_function, unicode_literals
from nltk.compat import binary_type, text_type
import functools
import nltk.tree
try:
import pyparsing
except ImportError:
print('Warning: nltk.tgrep will not work without the `pyparsing` package')
print('installed.')
import re
class TgrepException(Exception):
'''Tgrep exception type.'''
pass
def ancestors(node):
'''
Returns the list of all nodes dominating the given tree node.
This method will not work with leaf nodes, since there is no way
to recover the parent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current:
results.append(current)
current = current.parent()
return results
def unique_ancestors(node):
'''
Returns the list of all nodes dominating the given node, where
there is only a single path of descent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current and len(current) == 1:
results.append(current)
current = current.parent()
return results
def _descendants(node):
'''
Returns the list of all nodes which are descended from the given
tree node in some way.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:]]
def _leftmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
left branches from this node.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:] if all(y == 0 for y in x)]
def _rightmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
right branches from this node.
'''
try:
rightmost_leaf = max(node.treepositions())
except AttributeError:
return []
return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
def _istree(obj):
'''Predicate to check whether `obj` is a nltk.tree.Tree.'''
return isinstance(obj, nltk.tree.Tree)
def _unique_descendants(node):
'''
Returns the list of all nodes descended from the given node, where
there is only a single path of descent.
'''
results = []
current = node
while current and _istree(current) and len(current) == 1:
current = current[0]
results.append(current)
return results
def _before(node):
'''
Returns the set of all nodes that are before the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] < pos[:len(x)]]
def _immediately_before(node):
'''
Returns the set of all nodes that are immediately before the given
node.
Tree node A immediately precedes node B if the last terminal
symbol (word) produced by A immediately precedes the first
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the left
idx = len(pos) - 1
while 0 <= idx and pos[idx] == 0:
idx -= 1
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] -= 1
before = tree[pos]
return [before] + _rightmost_descendants(before)
def _after(node):
'''
Returns the set of all nodes that are after the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] > pos[:len(x)]]
def _immediately_after(node):
'''
Returns the set of all nodes that are immediately after the given
node.
Tree node A immediately follows node B if the first terminal
symbol (word) produced by A immediately follows the last
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
current = node.parent()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the
# right
idx = len(pos) - 1
while 0 <= idx and pos[idx] == len(current) - 1:
idx -= 1
current = current.parent()
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] += 1
after = tree[pos]
return [after] + _leftmost_descendants(after)
def _tgrep_node_literal_value(node):
'''
Gets the string value of a given parse tree node, for comparison
using the tgrep node literal predicates.
'''
return (node.label() if _istree(node) else text_type(node))
def _tgrep_macro_use_action(_s, _l, tokens):
'''
Builds a lambda function which looks up the macro name used.
'''
assert len(tokens) == 1
assert tokens[0][0] == '@'
macro_name = tokens[0][1:]
def macro_use(n, m=None, l=None):
if m is None or macro_name not in m:
raise TgrepException('macro {0} not defined'.format(macro_name))
return m[macro_name](n, m, l)
return macro_use
def _tgrep_node_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on the name of its node.
'''
# print 'node tokens: ', tokens
if tokens[0] == "'":
# strip initial apostrophe (tgrep2 print command)
tokens = tokens[1:]
if len(tokens) > 1:
# disjunctive definition of a node name
assert list(set(tokens[1::2])) == ['|']
# recursively call self to interpret each node name definition
tokens = [_tgrep_node_action(None, None, [node])
for node in tokens[::2]]
# capture tokens and return the disjunction
return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens)
else:
if hasattr(tokens[0], '__call__'):
# this is a previously interpreted parenthetical node
# definition (lambda function)
return tokens[0]
elif tokens[0] == '*' or tokens[0] == '__':
return lambda n, m=None, l=None: True
elif tokens[0].startswith('"'):
assert tokens[0].endswith('"')
node_lit = tokens[0][1:-1].replace('\\"', '"').replace('\\\\', '\\')
return (lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s)(node_lit)
elif tokens[0].startswith('/'):
assert tokens[0].endswith('/')
node_lit = tokens[0][1:-1]
return (lambda r: lambda n, m=None, l=None:
r.search(_tgrep_node_literal_value(n)))(re.compile(node_lit))
elif tokens[0].startswith('i@'):
node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()])
return (lambda f: lambda n, m=None, l=None:
f(_tgrep_node_literal_value(n).lower()))(node_func)
else:
return (lambda s: lambda n, m=None, l=None:
_tgrep_node_literal_value(n) == s)(tokens[0])
def _tgrep_parens_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from a parenthetical notation.
'''
# print 'parenthetical tokens: ', tokens
assert len(tokens) == 3
assert tokens[0] == '('
assert tokens[2] == ')'
return tokens[1]
def _tgrep_nltk_tree_pos_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which returns true if the node is located at a specific tree
position.
'''
# recover the tuple from the parsed sting
node_tree_position = tuple(int(x) for x in tokens if x.isdigit())
# capture the node's tree position
return (lambda i: lambda n, m=None, l=None: (hasattr(n, 'treeposition') and
n.treeposition() == i))(node_tree_position)
def _tgrep_relation_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on its relation to other nodes in the tree.
'''
# print 'relation tokens: ', tokens
# process negation first if needed
negated = False
if tokens[0] == '!':
negated = True
tokens = tokens[1:]
if tokens[0] == '[':
# process square-bracketed relation expressions
assert len(tokens) == 3
assert tokens[2] == ']'
retval = tokens[1]
else:
# process operator-node relation expressions
assert len(tokens) == 2
operator, predicate = tokens
# A < B A is the parent of (immediately dominates) B.
if operator == '<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in n))
# A > B A is the child of B.
elif operator == '>':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
predicate(n.parent(), m, l))
# A <, B Synonymous with A <1 B.
elif operator == '<,' or operator == '<1':
retval = lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
predicate(n[0], m, l))
# A >, B Synonymous with A >1 B.
elif operator == '>,' or operator == '>1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[0]) and
predicate(n.parent(), m, l))
# A <N B B is the Nth child of A (the first child is <1).
elif operator[0] == '<' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= i < len(n) and
predicate(n[i], m, l)))(idx - 1)
# A >N B A is the Nth child of B (the first child is >1).
elif operator[0] == '>' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
0 <= i < len(n.parent()) and
(n is n.parent()[i]) and
predicate(n.parent(), m, l)))(idx - 1)
# A <' B B is the last child of A (also synonymous with A <-1 B).
# A <- B B is the last child of A (synonymous with A <-1 B).
elif operator == '<\'' or operator == '<-' or operator == '<-1':
retval = lambda n, m=None, l=None: (_istree(n) and bool(list(n))
and predicate(n[-1], m, l))
# A >' B A is the last child of B (also synonymous with A >-1 B).
# A >- B A is the last child of B (synonymous with A >-1 B).
elif operator == '>\'' or operator == '>-' or operator == '>-1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[-1]) and
predicate(n.parent(), m, l))
# A <-N B B is the N th-to-last child of A (the last child is <-1).
elif operator[:2] == '<-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= (i + len(n)) < len(n) and
predicate(n[i + len(n)], m, l)))(idx)
# A >-N B A is the N th-to-last child of B (the last child is >-1).
elif operator[:2] == '>-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None:
(hasattr(n, 'parent') and
bool(n.parent()) and
0 <= (i + len(n.parent())) < len(n.parent()) and
(n is n.parent()[i + len(n.parent())]) and
predicate(n.parent(), m, l)))(idx)
# A <: B B is the only child of A
elif operator == '<:':
retval = lambda n, m=None, l=None: (_istree(n) and
len(n) == 1 and
predicate(n[0], m, l))
# A >: B A is the only child of B.
elif operator == '>:':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
len(n.parent()) == 1 and
predicate(n.parent(), m, l))
# A << B A dominates B (A is an ancestor of B).
elif operator == '<<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in _descendants(n)))
# A >> B A is dominated by B (A is a descendant of B).
elif operator == '>>':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in ancestors(n))
# A <<, B B is a left-most descendant of A.
elif operator == '<<,' or operator == '<<1':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _leftmost_descendants(n)))
# A >>, B A is a left-most descendant of B.
elif operator == '>>,':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _leftmost_descendants(x))
for x in ancestors(n))
# A <<' B B is a right-most descendant of A.
elif operator == '<<\'':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _rightmost_descendants(n)))
# A >>' B A is a right-most descendant of B.
elif operator == '>>\'':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _rightmost_descendants(x))
for x in ancestors(n))
# A <<: B There is a single path of descent from A and B is on it.
elif operator == '<<:':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _unique_descendants(n)))
# A >>: B There is a single path of descent from B and A is on it.
elif operator == '>>:':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in unique_ancestors(n))
# A . B A immediately precedes B.
elif operator == '.':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_after(n))
# A , B A immediately follows B.
elif operator == ',':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_before(n))
# A .. B A precedes B.
elif operator == '..':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _after(n))
# A ,, B A follows B.
elif operator == ',,':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _before(n))
# A $ B A is a sister of B (and A != B).
elif operator == '$' or operator == '%':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
any(predicate(x, m, l)
for x in n.parent() if x is not n))
# A $. B A is a sister of and immediately precedes B.
elif operator == '$.' or operator == '%.':
retval = lambda n, m=None, l=None: (hasattr(n, 'right_sibling') and
bool(n.right_sibling()) and
predicate(n.right_sibling(), m, l))
# A $, B A is a sister of and immediately follows B.
elif operator == '$,' or operator == '%,':
retval = lambda n, m=None, l=None: (hasattr(n, 'left_sibling') and
bool(n.left_sibling()) and
predicate(n.left_sibling(), m, l))
# A $.. B A is a sister of and precedes B.
elif operator == '$..' or operator == '%..':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[n.parent_index() + 1:]))
# A $,, B A is a sister of and follows B.
elif operator == '$,,' or operator == '%,,':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[:n.parent_index()]))
else:
raise TgrepException(
'cannot interpret tgrep operator "{0}"'.format(operator))
# now return the built function
if negated:
return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval)
else:
return retval
def _tgrep_conjunction_action(_s, _l, tokens, join_char = '&'):
'''
Builds a lambda function representing a predicate on a tree node
from the conjunction of several other such lambda functions.
This is prototypically called for expressions like
(`tgrep_rel_conjunction`)::
< NP & < AP < VP
where tokens is a list of predicates representing the relations
(`< NP`, `< AP`, and `< VP`), possibly with the character `&`
included (as in the example here).
This is also called for expressions like (`tgrep_node_expr2`)::
NP < NN
S=s < /NP/=n : s < /VP/=v : n .. v
tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional)
list of segmented patterns (`tgrep_expr_labeled`, processed by
`_tgrep_segmented_pattern_action`).
'''
# filter out the ampersand
tokens = [x for x in tokens if x != join_char]
# print 'relation conjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
else:
return (lambda ts: lambda n, m=None, l=None: all(predicate(n, m, l)
for predicate in ts))(tokens)
def _tgrep_segmented_pattern_action(_s, _l, tokens):
'''
Builds a lambda function representing a segmented pattern.
Called for expressions like (`tgrep_expr_labeled`)::
=s .. =v < =n
This is a segmented pattern, a tgrep2 expression which begins with
a node label.
The problem is that for segemented_pattern_action (': =v < =s'),
the first element (in this case, =v) is specifically selected by
virtue of matching a particular node in the tree; to retrieve
the node, we need the label, not a lambda function. For node
labels inside a tgrep_node_expr, we need a lambda function which
returns true if the node visited is the same as =v.
We solve this by creating two copies of a node_label_use in the
grammar; the label use inside a tgrep_expr_labeled has a separate
parse action to the pred use inside a node_expr. See
`_tgrep_node_label_use_action` and
`_tgrep_node_label_pred_use_action`.
'''
# tokens[0] is a string containing the node label
node_label = tokens[0]
# tokens[1:] is an (optional) list of predicates which must all
# hold of the bound node
reln_preds = tokens[1:]
def pattern_segment_pred(n, m=None, l=None):
'''This predicate function ignores its node argument.'''
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# match the relation predicates against the node
return all(pred(node, m, l) for pred in reln_preds)
return pattern_segment_pred
def _tgrep_node_label_use_action(_s, _l, tokens):
'''
Returns the node label used to begin a tgrep_expr_labeled. See
`_tgrep_segmented_pattern_action`.
Called for expressions like (`tgrep_node_label_use`)::
=s
when they appear as the first element of a `tgrep_expr_labeled`
expression (see `_tgrep_segmented_pattern_action`).
It returns the node label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
return tokens[0][1:]
def _tgrep_node_label_pred_use_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which describes the use of a previously bound node label.
Called for expressions like (`tgrep_node_label_use_pred`)::
=s
when they appear inside a tgrep_node_expr (for example, inside a
relation). The predicate returns true if and only if its node
argument is identical the the node looked up in the node label
dictionary using the node's label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
node_label = tokens[0][1:]
def node_label_use_pred(n, m=None, l=None):
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# truth means the given node is this node
return n is node
return node_label_use_pred
def _tgrep_bind_node_label_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which can optionally bind a matching node into the tgrep2 string's
label_dict.
Called for expressions like (`tgrep_node_expr2`)::
/NP/
@NP=n
'''
# tokens[0] is a tgrep_node_expr
if len(tokens) == 1:
return tokens[0]
else:
# if present, tokens[1] is the character '=', and tokens[2] is
# a tgrep_node_label, a string value containing the node label
assert len(tokens) == 3
assert tokens[1] == '='
node_pred = tokens[0]
node_label = tokens[2]
def node_label_bind_pred(n, m=None, l=None):
if node_pred(n, m, l):
# bind `n` into the dictionary `l`
if l is None:
raise TgrepException(
'cannot bind node_label {0}: label_dict is None'.format(
node_label))
l[node_label] = n
return True
else:
return False
return node_label_bind_pred
def _tgrep_rel_disjunction_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from the disjunction of several other such lambda functions.
'''
# filter out the pipe
tokens = [x for x in tokens if x != '|']
# print 'relation disjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
elif len(tokens) == 2:
return (lambda a, b: lambda n, m=None, l=None:
a(n, m, l) or b(n, m, l))(tokens[0], tokens[1])
def _macro_defn_action(_s, _l, tokens):
'''
Builds a dictionary structure which defines the given macro.
'''
assert len(tokens) == 3
assert tokens[0] == '@'
return {tokens[1]: tokens[2]}
def _tgrep_exprs_action(_s, _l, tokens):
'''
This is the top-lebel node in a tgrep2 search string; the
predicate function it returns binds together all the state of a
tgrep2 search string.
Builds a lambda function representing a predicate on a tree node
from the disjunction of several tgrep expressions. Also handles
macro definitions and macro name binding, and node label
definitions and node label binding.
'''
if len(tokens) == 1:
return lambda n, m=None, l=None: tokens[0](n, None, {})
# filter out all the semicolons
tokens = [x for x in tokens if x != ';']
# collect all macro definitions
macro_dict = {}
macro_defs = [tok for tok in tokens if isinstance(tok, dict)]
for macro_def in macro_defs:
macro_dict.update(macro_def)
# collect all tgrep expressions
tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)]
# create a new scope for the node label dictionary
def top_level_pred(n, m=macro_dict, l=None):
label_dict = {}
# bind macro definitions and OR together all tgrep_exprs
return any(predicate(n, m, label_dict) for predicate in tgrep_exprs)
return top_level_pred
def _build_tgrep_parser(set_parse_actions = True):
'''
Builds a pyparsing-based parser object for tokenizing and
interpreting tgrep search strings.
'''
tgrep_op = (pyparsing.Optional('!') +
pyparsing.Regex('[$%,.<>][%,.<>0-9-\':]*'))
tgrep_qstring = pyparsing.QuotedString(quoteChar='"', escChar='\\',
unquoteResults=False)
tgrep_node_regex = pyparsing.QuotedString(quoteChar='/', escChar='\\',
unquoteResults=False)
tgrep_qstring_icase = pyparsing.Regex(
'i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"')
tgrep_node_regex_icase = pyparsing.Regex(
'i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/')
tgrep_node_literal = pyparsing.Regex('[^][ \r\t\n;:.,&|<>()$!@%\'^=]+')
tgrep_expr = pyparsing.Forward()
tgrep_relations = pyparsing.Forward()
tgrep_parens = pyparsing.Literal('(') + tgrep_expr + ')'
tgrep_nltk_tree_pos = (
pyparsing.Literal('N(') +
pyparsing.Optional(pyparsing.Word(pyparsing.nums) + ',' +
pyparsing.Optional(pyparsing.delimitedList(
pyparsing.Word(pyparsing.nums), delim=',') +
pyparsing.Optional(','))) + ')')
tgrep_node_label = pyparsing.Regex('[A-Za-z0-9]+')
tgrep_node_label_use = pyparsing.Combine('=' + tgrep_node_label)
# see _tgrep_segmented_pattern_action
tgrep_node_label_use_pred = tgrep_node_label_use.copy()
macro_name = pyparsing.Regex('[^];:.,&|<>()[$!@%\'^=\r\t\n ]+')
macro_name.setWhitespaceChars('')
macro_use = pyparsing.Combine('@' + macro_name)
tgrep_node_expr = (tgrep_node_label_use_pred |
macro_use |
tgrep_nltk_tree_pos |
tgrep_qstring_icase |
tgrep_node_regex_icase |
tgrep_qstring |
tgrep_node_regex |
'*' |
tgrep_node_literal)
tgrep_node_expr2 = ((tgrep_node_expr +
pyparsing.Literal('=').setWhitespaceChars('') +
tgrep_node_label.copy().setWhitespaceChars('')) |
tgrep_node_expr)
tgrep_node = (tgrep_parens |
(pyparsing.Optional("'") +
tgrep_node_expr2 +
pyparsing.ZeroOrMore("|" + tgrep_node_expr)))
tgrep_brackets = pyparsing.Optional('!') + '[' + tgrep_relations + ']'
tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node)
tgrep_rel_conjunction = pyparsing.Forward()
tgrep_rel_conjunction << (tgrep_relation +
pyparsing.ZeroOrMore(pyparsing.Optional('&') +
tgrep_rel_conjunction))
tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore(
"|" + tgrep_relations)
tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations)
tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations)
tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(':' + tgrep_expr_labeled)
macro_defn = (pyparsing.Literal('@') +
pyparsing.White().suppress() +
macro_name +
tgrep_expr2)
tgrep_exprs = (pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(';' + macro_defn) + ';') +
tgrep_expr2 +
pyparsing.ZeroOrMore(';' + (macro_defn | tgrep_expr2)) +
pyparsing.ZeroOrMore(';').suppress())
if set_parse_actions:
tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action)
tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action)
macro_use.setParseAction(_tgrep_macro_use_action)
tgrep_node.setParseAction(_tgrep_node_action)
tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action)
tgrep_parens.setParseAction(_tgrep_parens_action)
tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action)
tgrep_relation.setParseAction(_tgrep_relation_action)
tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action)
tgrep_relations.setParseAction(_tgrep_rel_disjunction_action)
macro_defn.setParseAction(_macro_defn_action)
# the whole expression is also the conjunction of two
# predicates: the first node predicate, and the remaining
# relation predicates
tgrep_expr.setParseAction(_tgrep_conjunction_action)
tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action)
tgrep_expr2.setParseAction(functools.partial(_tgrep_conjunction_action,
join_char = ':'))
tgrep_exprs.setParseAction(_tgrep_exprs_action)
return tgrep_exprs.ignore('#' + pyparsing.restOfLine)
def tgrep_tokenize(tgrep_string):
'''
Tokenizes a TGrep search string into separate tokens.
'''
parser = _build_tgrep_parser(False)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string))
def tgrep_compile(tgrep_string):
'''
Parses (and tokenizes, if necessary) a TGrep search string into a
lambda function.
'''
parser = _build_tgrep_parser(True)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string, parseAll=True))[0]
def treepositions_no_leaves(tree):
'''
Returns all the tree positions in the given tree which are not
leaf nodes.
'''
treepositions = tree.treepositions()
# leaves are treeposition tuples that are not prefixes of any
# other treeposition
prefixes = set()
for pos in treepositions:
for length in range(len(pos)):
prefixes.add(pos[:length])
return [pos for pos in treepositions if pos in prefixes]
def tgrep_positions(pattern, trees, search_leaves=True):
"""
Return the tree positions in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree positions)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [position for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
def tgrep_nodes(pattern, trees, search_leaves=True):
"""
Return the tree nodes in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree nodes)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [tree[position] for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
| gpl-3.0 |
Glorf/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/abort_wsh.py | 465 | 1776 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
| mpl-2.0 |
scollis/high_resolution_hydrology | cluster/profile_mpi0/ipython_kernel_config.py | 4 | 15364 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._execute_sleep = 0.0005
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 2.7.10 |Anaconda 1.9.1 (64-bit)| (default, May 28 2015, 17:02:03) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://anaconda.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'scollis'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| bsd-2-clause |
prozorro-sale/openprocurement.auctions.dgf | openprocurement/auctions/dgf/tests/base.py | 1 | 18881 | # -*- coding: utf-8 -*-
import os
from datetime import datetime, timedelta
from copy import deepcopy
from datetime import datetime
from uuid import uuid4
from base64 import b64encode
from urllib import urlencode
from openprocurement.auctions.core.utils import get_now
from openprocurement.api.constants import SANDBOX_MODE
from openprocurement.api.utils import apply_data_patch
from openprocurement.auctions.core.tests.base import (
BaseWebTest as CoreBaseWebTest,
BaseAuctionWebTest as CoreBaseAuctionWebTest
)
DEFAULT_ACCELERATION = 1440
now = datetime.now()
test_organization = {
"name": u"Державне управління справами",
"identifier": {
"scheme": u"UA-EDR",
"id": u"00037256",
"uri": u"http://www.dus.gov.ua/"
},
"address": {
"countryName": u"Україна",
"postalCode": u"01220",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова, 11, корпус 1"
},
"contactPoint": {
"name": u"Державне управління справами",
"telephone": u"0440000000"
}
}
test_procuringEntity = test_organization.copy()
test_auction_data = {
"title": u"футляри до державних нагород",
"dgfID": u"219560",
"tenderAttempts": 1,
"procuringEntity": test_procuringEntity,
"value": {
"amount": 100,
"currency": u"UAH"
},
"minimalStep": {
"amount": 35,
"currency": u"UAH"
},
"items": [
{
"description": u"Земля для військовослужбовців",
"classification": {
"scheme": u"CPV",
"id": u"66113000-5",
"description": u"Земельні ділянки"
},
"unit": {
"name": u"item",
"code": u"44617100-9"
},
"quantity": 5.001,
"contractPeriod": {
"startDate": (now + timedelta(days=2)).isoformat(),
"endDate": (now + timedelta(days=5)).isoformat()
},
"address": {
"countryName": u"Україна",
"postalCode": "79000",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова 1"
}
}
],
"auctionPeriod": {
"startDate": (now.date() + timedelta(days=14)).isoformat()
},
"procurementMethodType": "dgfOtherAssets",
}
if SANDBOX_MODE:
test_auction_data['procurementMethodDetails'] = 'quick, accelerator={}'.format(DEFAULT_ACCELERATION)
test_auction_maximum_data = deepcopy(test_auction_data)
test_auction_maximum_data.update({
"title_en" : u"Cases with state awards",
"title_ru" : u"футляры к государственным наградам",
"description" : u"футляри до державних нагород",
"description_en" : u"Cases with state awards",
"description_ru" : u"футляры к государственным наградам"
})
test_auction_maximum_data["items"][0].update({
"description_en" : u"Cases with state awards",
"description_ru" : u"футляры к государственным наградам"
})
test_features_auction_data = test_auction_data.copy()
test_features_item = test_features_auction_data['items'][0].copy()
test_features_item['id'] = "1"
test_features_auction_data['items'] = [test_features_item]
test_features_auction_data["features"] = [
{
"code": "OCDS-123454-AIR-INTAKE",
"featureOf": "item",
"relatedItem": "1",
"title": u"Потужність всмоктування",
"title_en": "Air Intake",
"description": u"Ефективна потужність всмоктування пилососа, в ватах (аероватах)",
"enum": [
{
"value": 0.1,
"title": u"До 1000 Вт"
},
{
"value": 0.15,
"title": u"Більше 1000 Вт"
}
]
},
{
"code": "OCDS-123454-YEARS",
"featureOf": "tenderer",
"title": u"Років на ринку",
"title_en": "Years trading",
"description": u"Кількість років, які організація учасник працює на ринку",
"enum": [
{
"value": 0.05,
"title": u"До 3 років"
},
{
"value": 0.1,
"title": u"Більше 3 років, менше 5 років"
},
{
"value": 0.15,
"title": u"Більше 5 років"
}
]
}
]
base_test_bids = [
{
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
},
{
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
test_bids = []
for i in base_test_bids:
i = deepcopy(i)
i.update({'qualified': True})
test_bids.append(i)
test_lots = [
{
'title': 'lot title',
'description': 'lot description',
'value': test_auction_data['value'],
'minimalStep': test_auction_data['minimalStep'],
}
]
test_features = [
{
"code": "code_item",
"featureOf": "item",
"relatedItem": "1",
"title": u"item feature",
"enum": [
{
"value": 0.01,
"title": u"good"
},
{
"value": 0.02,
"title": u"best"
}
]
},
{
"code": "code_tenderer",
"featureOf": "tenderer",
"title": u"tenderer feature",
"enum": [
{
"value": 0.01,
"title": u"good"
},
{
"value": 0.02,
"title": u"best"
}
]
}
]
test_financial_auction_data = deepcopy(test_auction_data)
test_financial_auction_data["procurementMethodType"] = "dgfFinancialAssets"
test_financial_organization = deepcopy(test_organization)
test_financial_organization['additionalIdentifiers'] = [{
"scheme": u"UA-FIN",
"id": u"А01 457213"
}]
test_financial_bids = []
for i in test_bids:
bid = deepcopy(i)
bid.update({'eligible': True})
bid['tenderers'] = [test_financial_organization]
test_financial_bids.append(bid)
class BaseWebTest(CoreBaseWebTest):
"""Base Web Test to test openprocurement.auctions.dgf.
It setups the database before each test and delete it after.
"""
relative_to = os.path.dirname(__file__)
class BaseAuctionWebTest(CoreBaseAuctionWebTest):
relative_to = os.path.dirname(__file__)
initial_data = test_auction_data
initial_organization = test_organization
def go_to_rectificationPeriod_end(self):
now = get_now()
self.set_status('active.tendering', {
"rectificationPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now - (timedelta(minutes=6) if SANDBOX_MODE else timedelta(days=6))).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now + (timedelta(minutes=1) if SANDBOX_MODE else timedelta(days=1))).isoformat()
},
"enquiryPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now + (timedelta(minutes=1) if SANDBOX_MODE else timedelta(days=1))).isoformat()
},
"auctionPeriod": {
"startDate": (now + timedelta(days=1)).isoformat()
}
})
def set_status(self, status, extra=None):
data = {'status': status}
if status == 'active.tendering':
data.update({
"enquiryPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=7)).isoformat()
},
"rectificationPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=1)).isoformat()
},
"tenderPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=7)).isoformat()
}
})
elif status == 'active.auction':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now).isoformat()
},
"auctionPeriod": {
"startDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'active.qualification':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
},
"awardPeriod": {
"startDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'active.awarded':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
},
"awardPeriod": {
"startDate": (now).isoformat(),
"endDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'complete':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=11)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=17)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=11)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=11)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
},
"awardPeriod": {
"startDate": (now - timedelta(days=10)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=11)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
}
}
for i in self.initial_lots
]
})
if extra:
data.update(extra)
auction = self.db.get(self.auction_id)
auction.update(apply_data_patch(auction, data))
self.db.save(auction)
authorization = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
#response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.app.authorization = authorization
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
return response
def upload_auction_protocol(self, award):
award_id = award['id']
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(self.auction_id, award_id, self.auction_token),
{'data': {
'title': 'auction_protocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
"description": "auction protocol",
"documentType": 'auctionProtocol',
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
self.assertEqual('auctionProtocol', response.json["data"]["documentType"])
self.assertEqual('auction_owner', response.json["data"]["author"])
def post_auction_results(self):
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": b['value']
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
def generate_docservice_url(self):
uuid = uuid4().hex
key = self.app.app.registry.docservice_key
keyid = key.hex_vk()[:8]
signature = b64encode(key.signature("{}\0{}".format(uuid, '0' * 32)))
query = {'Signature': signature, 'KeyID': keyid}
return "http://localhost/get/{}?{}".format(uuid, urlencode(query))
def patch_award(self, award_id, status, bid_token=None):
if bid_token:
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, award_id, bid_token), {"data": {"status": status}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
return response
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, award_id), {"data": {"status": status}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
return response
def forbidden_patch_award(self, award_id, before_status, status):
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, award_id), {"data": {"status": status}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award ({}) status to ({}) status".format(before_status, status))
class BaseFinancialAuctionWebTest(BaseAuctionWebTest):
relative_to = os.path.dirname(__file__)
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
| apache-2.0 |
jolyonb/edx-platform | common/test/acceptance/pages/studio/edit_tabs.py | 1 | 5240 | """
Pages page for a course.
"""
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.course_page import CoursePage
from bok_choy.promise import EmptyPromise
from selenium.webdriver import ActionChains
class PagesPage(CoursePage):
"""
Pages page for a course.
"""
url_path = "tabs"
def is_browser_on_page(self):
return self.q(css='body.view-static-pages').present
def is_static_page_present(self):
"""
Checks for static tab's presence
Returns:
bool: True if present
"""
return self.q(css='.wrapper.wrapper-component-action-header').present
def add_static_page(self):
"""
Adds a static page
"""
total_tabs = len(self.q(css='.course-nav-list>li'))
click_css(self, '.add-pages .new-tab', require_notification=False)
self.wait_for(
lambda: len(self.q(css='.course-nav-list>li')) == total_tabs + 1,
description="Static tab is added"
)
self.wait_for_element_visibility(
u'.tab-list :nth-child({}) .xblock-student_view'.format(total_tabs),
'Static tab is visible'
)
# self.wait_for_ajax()
def delete_static_tab(self):
"""
Deletes a static page
"""
click_css(self, '.btn-default.delete-button.action-button', require_notification=False)
confirm_prompt(self)
def click_edit_static_page(self):
"""
Clicks on edit button to open up the xblock modal
"""
self.q(css='.edit-button').first.click()
EmptyPromise(
lambda: self.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
def drag_and_drop_first_static_page_to_last(self):
"""
Drags and drops the first the static page to the last
"""
draggable_elements = self.q(css='.component .drag-handle').results
source_element = draggable_elements[0]
target_element = self.q(css='.new-component-item').results[0]
action = ActionChains(self.browser)
action.drag_and_drop(source_element, target_element).perform()
self.wait_for_ajax()
def drag_and_drop(self, default_tab=False):
"""
Drags and drops the first static page to the last
"""
css_selector = '.component .drag-handle'
if default_tab:
css_selector = '.drag-handle.action'
source_element = self.q(css=css_selector).results[0]
target_element = self.q(css='.new-component-item').results[0]
action = ActionChains(self.browser)
action.drag_and_drop(source_element, target_element).perform()
self.wait_for_ajax()
@property
def static_tab_titles(self):
"""
Return titles of all static tabs
Returns:
list: list of all the titles
"""
self.wait_for_element_visibility(
'.wrapper-component-action-header .component-actions',
"Tab's edit button is visible"
)
return self.q(css='div.xmodule_StaticTabModule').text
@property
def built_in_page_titles(self):
"""
Gets the default tab title
Returns:
list: list of all the titles
"""
return self.q(css='.course-nav-list.ui-sortable h3').text
def open_settings_tab(self):
"""
Clicks settings tab
"""
self.q(css='.editor-modes .settings-button').first.click()
self.wait_for_ajax()
def is_tab_visible(self, tab_name):
"""
Checks for the tab's visibility
Args:
tab_name(string): Name of the tab for which visibility is to be checked
Returns:
true(bool): if tab is visible
false(bool): if tab is not visible
"""
css_selector = u'[data-tab-id="{}"] .toggle-checkbox'.format(tab_name)
return True if not self.q(css=css_selector).selected else False
def toggle_tab(self, tab_name):
"""
Toggles the visibility on tab
Args:
tab_name(string): Name of the tab to be toggled
"""
css_selector = u'[data-tab-id="{}"] .action-visible'.format(tab_name)
return self.q(css=css_selector).first.click()
def set_field_val(self, field_display_name, field_value):
"""
Set the value of a field in editor
Arguments:
field_display_name(str): Display name of the field for which the value is to be changed
field_value(str): New value for the field
"""
selector = u'.xblock-studio_view li.field label:contains("{}") + input'.format(field_display_name)
script = '$(arguments[0]).val(arguments[1]).change();'
self.browser.execute_script(script, selector, field_value)
def save(self):
"""
Clicks save button.
"""
click_css(self, '.action-save')
def refresh_and_wait_for_load(self):
"""
Refresh the page and wait for all resources to load.
"""
self.browser.refresh()
self.wait_for_page()
| agpl-3.0 |
alxgu/ansible | lib/ansible/modules/system/aix_lvol.py | 44 | 10578 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
version_added: "2.4"
options:
vg:
description:
- The volume group this logical volume is part of.
type: str
required: true
lv:
description:
- The name of the logical volume.
type: str
required: true
lv_type:
description:
- The type of the logical volume.
type: str
default: jfs2
size:
description:
- The size of the logical volume with one of the [MGT] units.
type: str
copies:
description:
- The number of copies of the logical volume.
- Maximum copies are 3.
type: int
default: 1
policy:
description:
- Sets the interphysical volume allocation policy.
- C(maximum) allocates logical partitions across the maximum number of physical volumes.
- C(minimum) allocates logical partitions across the minimum number of physical volumes.
type: str
choices: [ maximum, minimum ]
default: maximum
state:
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
type: str
choices: [ absent, present ]
default: present
opts:
description:
- Free-form options to be passed to the mklv command.
type: str
pvs:
description:
- A list of physical volumes e.g. C(hdisk1,hdisk2).
type: list
'''
EXAMPLES = r'''
- name: Create a logical volume of 512M
aix_lvol:
vg: testvg
lv: testlv
size: 512M
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
aix_lvol:
vg: testvg
lv: test2lv
size: 512M
pvs: [ hdisk1, hdisk2 ]
- name: Create a logical volume of 512M mirrored
aix_lvol:
vg: testvg
lv: test3lv
size: 512M
copies: 2
- name: Create a logical volume of 1G with a minimum placement policy
aix_lvol:
vg: rootvg
lv: test4lv
size: 1G
policy: minimum
- name: Create a logical volume with special options like mirror pool
aix_lvol:
vg: testvg
lv: testlv
size: 512M
opts: -p copy1=poolA -p copy2=poolB
- name: Extend the logical volume to 1200M
aix_lvol:
vg: testvg
lv: test4lv
size: 1200M
- name: Remove the logical volume
aix_lvol:
vg: testvg
lv: testlv
state: absent
'''
RETURN = r'''
msg:
type: str
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
'''
import re
from ansible.module_utils.basic import AnsibleModule
def convert_size(module, size):
unit = size[-1].upper()
units = ['M', 'G', 'T']
try:
multiplier = 1024 ** units.index(unit)
except ValueError:
module.fail_json(msg="No valid size unit specified.")
return int(size[:-1]) * multiplier
def round_ppsize(x, base=16):
new_size = int(base * round(float(x) / base))
if new_size < x:
new_size += base
return new_size
def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
if not name:
return None
size = lps * pp_size
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
def parse_vg(data):
for line in data.splitlines():
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str', required=True),
lv_type=dict(type='str', default='jfs2'),
size=dict(type='str'),
opts=dict(type='str', default=''),
copies=dict(type='int', default=1),
state=dict(type='str', default='present', choices=['absent', 'present']),
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
pvs=dict(type='list', default=list())
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
lv_type = module.params['lv_type']
size = module.params['size']
opts = module.params['opts']
copies = module.params['copies']
policy = module.params['policy']
state = module.params['state']
pvs = module.params['pvs']
pv_list = ' '.join(pvs)
if policy == 'maximum':
lv_policy = 'x'
else:
lv_policy = 'm'
# Add echo command when running in check-mode
if module.check_mode:
test_opt = 'echo '
else:
test_opt = ''
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
this_vg = parse_vg(vg_info)
if size is not None:
# Calculate pp size and round it up based on pp size.
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
rc, lv_info, err = module.run_command(
"%s %s" % (lslv_cmd, lv))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
changed = False
this_lv = parse_lv(lv_info)
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
if this_lv is None:
if state == 'present':
if lv_size > this_vg['free']:
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
else:
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
else:
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
else:
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
if vg != this_lv['vg']:
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
if not size:
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
else:
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
elif lv_size < this_lv['size']:
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
else:
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
if __name__ == '__main__':
main()
| gpl-3.0 |
zippy2/libvirt | scripts/check-file-access.py | 5 | 4267 | #!/usr/bin/env python3
#
# Copyright (C) 2016-2019 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
#
# This script is supposed to check test_file_access.txt file and
# warn about file accesses outside our working tree.
#
#
import os
import re
import sys
import tempfile
abs_builddir = os.environ.get('abs_builddir', '')
abs_srcdir = os.environ.get('abs_srcdir', '')
access_fd, access_file = tempfile.mkstemp(dir=abs_builddir,
prefix='file-access-',
suffix='.txt')
permitted_file = os.path.join(abs_srcdir, 'permitted_file_access.txt')
os.environ['VIR_TEST_FILE_ACCESS_OUTPUT'] = access_file
test = ' '.join(sys.argv[1:])
ret = os.system(test)
if ret != 0 or os.read(access_fd, 10) == b'':
os.close(access_fd)
os.remove(access_file)
sys.exit(ret)
known_actions = ["open", "fopen", "access", "stat", "lstat", "connect"]
files = []
permitted = []
with os.fdopen(access_fd, "r") as fh:
for line in fh:
line = line.rstrip("\n")
m = re.search(r'''^(\S*):\s*(\S*):\s*(\S*)(\s*:\s*(.*))?$''', line)
if m is not None:
rec = {
"path": m.group(1),
"action": m.group(2),
"progname": m.group(3),
"testname": m.group(5),
}
files.append(rec)
else:
raise Exception("Malformed line %s" % line)
with open(permitted_file, "r") as fh:
for line in fh:
line = line.rstrip("\n")
if re.search(r'''^\s*#.*$''', line):
continue # comment
if line == "":
continue
m = re.search(r'''^(\S*):\s*(\S*)(:\s*(\S*)(\s*:\s*(.*))?)?$''', line)
if m is not None and m.group(2) in known_actions:
# $path: $action: $progname: $testname
rec = {
"path": m.group(1),
"action": m.group(3),
"progname": m.group(4),
"testname": m.group(6),
}
permitted.append(rec)
else:
m = re.search(r'''^(\S*)(:\s*(\S*)(\s*:\s*(.*))?)?$''', line)
if m is not None:
# $path: $progname: $testname
rec = {
"path": m.group(1),
"action": None,
"progname": m.group(3),
"testname": m.group(5),
}
permitted.append(rec)
else:
raise Exception("Malformed line %s" % line)
# Now we should check if %traces is included in $permitted. For
# now checking just keys is sufficient
err = False
for file in files:
match = False
for rule in permitted:
if not re.match("^" + rule["path"] + "$", file["path"]):
continue
if (rule["action"] is not None and
not re.match("^" + rule["action"] + "$", file["action"])):
continue
if (rule["progname"] is not None and
not re.match("^" + rule["progname"] + "$", file["progname"])):
continue
if (rule["testname"] is not None and
file["testname"] is not None and
not re.match("^" + rule["testname"] + "$", file["testname"])):
continue
match = True
if not match:
err = True
print("%s: %s: %s" %
(file["path"], file["action"], file["progname"]),
end="")
if file["testname"] is not None:
print(": %s" % file["testname"], end="")
print("")
os.remove(access_file)
if err:
sys.exit(1)
sys.exit(0)
| lgpl-2.1 |
gengue/django-role-permissions | rolepermissions/tests/test_verifications.py | 2 | 3242 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from model_mommy import mommy
from rolepermissions.roles import RolesManager, AbstractUserRole
from rolepermissions.verifications import has_role, has_permission, has_object_permission
from rolepermissions.permissions import register_object_checker
class VerRole1(AbstractUserRole):
available_permissions = {
'permission1': True,
'permission2': True,
}
class VerRole2(AbstractUserRole):
available_permissions = {
'permission3': True,
'permission4': False,
}
class VerRole3(AbstractUserRole):
role_name = 'ver_new_name'
available_permissions = {
'permission5': False,
'permission6': False,
}
class HasRoleTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_user_has_VerRole1(self):
user = self.user
self.assertTrue(has_role(user, VerRole1))
def test_user_does_not_have_VerRole2(self):
user = self.user
self.assertFalse(has_role(user, VerRole2))
def test_user_has_VerRole1_or_VerRole2(self):
user = self.user
self.assertTrue(has_role(user, [VerRole1, VerRole2]))
def test_has_role_by_name(self):
user = self.user
self.assertTrue(has_role(user, 'ver_role1'))
def test_user_has_VerRole1_or_VerRole3_by_name(self):
user = self.user
VerRole3.assign_role_to_user(user)
self.assertTrue(has_role(user, ['ver_role1', 'ver_new_name']))
def test_not_existent_role(self):
user = self.user
self.assertFalse(has_role(user, 'not_a_role'))
def test_none_user_param(self):
self.assertFalse(has_role(None, 'ver_role1'))
class HasPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_has_VerRole1_permission(self):
user = self.user
self.assertTrue(has_permission(user, 'permission1'))
def test_dos_not_have_VerRole1_permission(self):
user = self.user
VerRole1.assign_role_to_user(user)
self.assertFalse(has_permission(user, 'permission3'))
def test_not_existent_permission(self):
user = self.user
self.assertFalse(has_permission(user, 'not_a_permission'))
def test_user_with_no_role(self):
user = mommy.make(get_user_model())
self.assertFalse(has_permission(user, 'permission1'))
def test_none_user_param(self):
self.assertFalse(has_permission(None, 'ver_role1'))
class HasObjectPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
@register_object_checker()
def obj_checker(role, user, obj):
return obj and True
def test_has_object_permission(self):
user = self.user
self.assertTrue(has_object_permission('obj_checker', user, True))
def test_does_not_have_object_permission(self):
user = self.user
self.assertFalse(has_object_permission('obj_checker', user, False))
| mit |
yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/pyspark/sql.py | 2 | 15241 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.rdd import RDD
from py4j.protocol import Py4JError
__all__ = ["SQLContext", "HiveContext", "LocalHiveContext", "TestHiveContext", "SchemaRDD", "Row"]
class SQLContext:
"""Main entry point for SparkSQL functionality.
A SQLContext can be used create L{SchemaRDD}s, register L{SchemaRDD}s as
tables, execute SQL over tables, cache tables, and read parquet files.
"""
def __init__(self, sparkContext, sqlContext = None):
"""Create a new SQLContext.
@param sparkContext: The SparkContext to wrap.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.inferSchema(srdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> bad_rdd = sc.parallelize([1,2,3])
>>> sqlCtx.inferSchema(bad_rdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> allTypes = sc.parallelize([{"int" : 1, "string" : "string", "double" : 1.0, "long": 1L,
... "boolean" : True}])
>>> srdd = sqlCtx.inferSchema(allTypes).map(lambda x: (x.int, x.string, x.double, x.long,
... x.boolean))
>>> srdd.collect()[0]
(1, u'string', 1.0, 1, True)
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
self._pythonToJavaMap = self._jvm.PythonRDD.pythonToJavaMap
if sqlContext:
self._scala_SQLContext = sqlContext
@property
def _ssql_ctx(self):
"""Accessor for the JVM SparkSQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
if not hasattr(self, '_scala_SQLContext'):
self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
return self._scala_SQLContext
def inferSchema(self, rdd):
"""Infer and apply a schema to an RDD of L{dict}s.
We peek at the first row of the RDD to determine the fields names
and types, and then use that to extract all the dictionaries.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.collect() == [{"field1" : 1, "field2" : "row1"}, {"field1" : 2, "field2": "row2"},
... {"field1" : 3, "field2": "row3"}]
True
"""
if (rdd.__class__ is SchemaRDD):
raise ValueError("Cannot apply schema to %s" % SchemaRDD.__name__)
elif not isinstance(rdd.first(), dict):
raise ValueError("Only RDDs with dictionaries can be converted to %s: %s" %
(SchemaRDD.__name__, rdd.first()))
jrdd = self._pythonToJavaMap(rdd._jrdd)
srdd = self._ssql_ctx.inferSchema(jrdd.rdd())
return SchemaRDD(srdd, self)
def registerRDDAsTable(self, rdd, tableName):
"""Registers the given RDD as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of
SQLContext.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
"""
if (rdd.__class__ is SchemaRDD):
jschema_rdd = rdd._jschema_rdd
self._ssql_ctx.registerRDDAsTable(jschema_rdd, tableName)
else:
raise ValueError("Can only register SchemaRDD as table")
def parquetFile(self, path):
"""Loads a Parquet file, returning the result as a L{SchemaRDD}.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> srdd.collect() == srdd2.collect()
True
"""
jschema_rdd = self._ssql_ctx.parquetFile(path)
return SchemaRDD(jschema_rdd, self)
def sql(self, sqlQuery):
"""Return a L{SchemaRDD} representing the result of the given query.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> srdd2.collect() == [{"f1" : 1, "f2" : "row1"}, {"f1" : 2, "f2": "row2"},
... {"f1" : 3, "f2": "row3"}]
True
"""
return SchemaRDD(self._ssql_ctx.sql(sqlQuery), self)
def table(self, tableName):
"""Returns the specified table as a L{SchemaRDD}.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.table("table1")
>>> srdd.collect() == srdd2.collect()
True
"""
return SchemaRDD(self._ssql_ctx.table(tableName), self)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from hive-site.xml on the classpath.
It supports running both SQL and HiveQL commands.
"""
@property
def _ssql_ctx(self):
try:
if not hasattr(self, '_scala_HiveContext'):
self._scala_HiveContext = self._get_hive_ctx()
return self._scala_HiveContext
except Py4JError as e:
raise Exception("You must build Spark with Hive. Export 'SPARK_HIVE=true' and run " \
"sbt/sbt assembly" , e)
def _get_hive_ctx(self):
return self._jvm.HiveContext(self._jsc.sc())
def hiveql(self, hqlQuery):
"""
Runs a query expressed in HiveQL, returning the result as a L{SchemaRDD}.
"""
return SchemaRDD(self._ssql_ctx.hiveql(hqlQuery), self)
def hql(self, hqlQuery):
"""
Runs a query expressed in HiveQL, returning the result as a L{SchemaRDD}.
"""
return self.hiveql(hqlQuery)
class LocalHiveContext(HiveContext):
"""Starts up an instance of hive where metadata is stored locally.
An in-process metadata data is created with data stored in ./metadata.
Warehouse data is stored in in ./warehouse.
>>> import os
>>> hiveCtx = LocalHiveContext(sc)
>>> try:
... supress = hiveCtx.hql("DROP TABLE src")
... except Exception:
... pass
>>> kv1 = os.path.join(os.environ["SPARK_HOME"], 'examples/src/main/resources/kv1.txt')
>>> supress = hiveCtx.hql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
>>> supress = hiveCtx.hql("LOAD DATA LOCAL INPATH '%s' INTO TABLE src" % kv1)
>>> results = hiveCtx.hql("FROM src SELECT value").map(lambda r: int(r.value.split('_')[1]))
>>> num = results.count()
>>> reduce_sum = results.reduce(lambda x, y: x + y)
>>> num
500
>>> reduce_sum
130091
"""
def _get_hive_ctx(self):
return self._jvm.LocalHiveContext(self._jsc.sc())
class TestHiveContext(HiveContext):
def _get_hive_ctx(self):
return self._jvm.TestHiveContext(self._jsc.sc())
# TODO: Investigate if it is more efficient to use a namedtuple. One problem is that named tuples
# are custom classes that must be generated per Schema.
class Row(dict):
"""A row in L{SchemaRDD}.
An extended L{dict} that takes a L{dict} in its constructor, and
exposes those items as fields.
>>> r = Row({"hello" : "world", "foo" : "bar"})
>>> r.hello
'world'
>>> r.foo
'bar'
"""
def __init__(self, d):
d.update(self.__dict__)
self.__dict__ = d
dict.__init__(self, d)
class SchemaRDD(RDD):
"""An RDD of L{Row} objects that has an associated schema.
The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
utilize the relational query api exposed by SparkSQL.
For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
L{SchemaRDD} is not operated on directly, as it's underlying
implementation is a RDD composed of Java objects. Instead it is
converted to a PythonRDD in the JVM, on which Python operations can
be done.
"""
def __init__(self, jschema_rdd, sql_ctx):
self.sql_ctx = sql_ctx
self._sc = sql_ctx._sc
self._jschema_rdd = jschema_rdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = self.sql_ctx._sc
self._jrdd_deserializer = self.ctx.serializer
@property
def _jrdd(self):
"""Lazy evaluation of PythonRDD object.
Only done when a user calls methods defined by the
L{pyspark.rdd.RDD} super class (map, filter, etc.).
"""
if not hasattr(self, '_lazy_jrdd'):
self._lazy_jrdd = self._toPython()._jrdd
return self._lazy_jrdd
@property
def _id(self):
return self._jrdd.id()
def saveAsParquetFile(self, path):
"""Save the contents as a Parquet file, preserving the schema.
Files that are written out using this method can be read back in as
a SchemaRDD using the L{SQLContext.parquetFile} method.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> srdd2.collect() == srdd.collect()
True
"""
self._jschema_rdd.saveAsParquetFile(path)
def registerAsTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the L{SQLContext}
that was used to create this SchemaRDD.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.registerAsTable("test")
>>> srdd2 = sqlCtx.sql("select * from test")
>>> srdd.collect() == srdd2.collect()
True
"""
self._jschema_rdd.registerAsTable(name)
def insertInto(self, tableName, overwrite = False):
"""Inserts the contents of this SchemaRDD into the specified table.
Optionally overwriting any existing data.
"""
self._jschema_rdd.insertInto(tableName, overwrite)
def saveAsTable(self, tableName):
"""Creates a new table with the contents of this SchemaRDD."""
self._jschema_rdd.saveAsTable(tableName)
def count(self):
"""Return the number of elements in this RDD.
Unlike the base RDD implementation of count, this implementation
leverages the query optimizer to compute the count on the SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.count()
3L
>>> srdd.count() == srdd.map(lambda x: x).count()
True
"""
return self._jschema_rdd.count()
def _toPython(self):
# We have to import the Row class explicitly, so that the reference Pickler has is
# pyspark.sql.Row instead of __main__.Row
from pyspark.sql import Row
jrdd = self._jschema_rdd.javaToPython()
# TODO: This is inefficient, we should construct the Python Row object
# in Java land in the javaToPython function. May require a custom
# pickle serializer in Pyrolite
return RDD(jrdd, self._sc, self._sc.serializer).map(lambda d: Row(d))
# We override the default cache/persist/checkpoint behavior as we want to cache the underlying
# SchemaRDD object in the JVM, not the PythonRDD checkpointed by the super class
def cache(self):
self.is_cached = True
self._jschema_rdd.cache()
return self
def persist(self, storageLevel):
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jschema_rdd.persist(javaStorageLevel)
return self
def unpersist(self):
self.is_cached = False
self._jschema_rdd.unpersist()
return self
def checkpoint(self):
self.is_checkpointed = True
self._jschema_rdd.checkpoint()
def isCheckpointed(self):
return self._jschema_rdd.isCheckpointed()
def getCheckpointFile(self):
checkpointFile = self._jschema_rdd.getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def coalesce(self, numPartitions, shuffle=False):
rdd = self._jschema_rdd.coalesce(numPartitions, shuffle)
return SchemaRDD(rdd, self.sql_ctx)
def distinct(self):
rdd = self._jschema_rdd.distinct()
return SchemaRDD(rdd, self.sql_ctx)
def intersection(self, other):
if (other.__class__ is SchemaRDD):
rdd = self._jschema_rdd.intersection(other._jschema_rdd)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only intersect with another SchemaRDD")
def repartition(self, numPartitions):
rdd = self._jschema_rdd.repartition(numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
def subtract(self, other, numPartitions=None):
if (other.__class__ is SchemaRDD):
if numPartitions is None:
rdd = self._jschema_rdd.subtract(other._jschema_rdd)
else:
rdd = self._jschema_rdd.subtract(other._jschema_rdd, numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only subtract another SchemaRDD")
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext('local[4]', 'PythonTest', batchSize=2)
globs['sc'] = sc
globs['sqlCtx'] = SQLContext(sc)
globs['rdd'] = sc.parallelize([{"field1" : 1, "field2" : "row1"},
{"field1" : 2, "field2": "row2"}, {"field1" : 3, "field2": "row3"}])
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
xwolf12/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
smira/spamfighter | spamfighter/utils/config.py | 1 | 10811 | # -*- coding: utf-8 -*-
#
# SpamFighter, Copyright 2008, 2009 NetStream LLC (http://netstream.ru/, we@netstream.ru)
#
# This file is part of SpamFighter.
#
# SpamFighter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SpamFighter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpamFighter. If not, see <http://www.gnu.org/licenses/>.
#
"""
Питоновский враппер конфигурационных XML файлов
Превращает xml файл с конфигом в питоновскую структуру настроек
Парсинг xml
===========
- тег со значением транслируется в строковую переменную::
<variable>value</variable>
>>>variable
"value"
- тег c аттрибутом type по возможности будет оттранслирован в переменную данного типа::
<variable type="int">5</variable>
>>>variable
5
- вложенные теги во вложенные объекты класса {L Cfg} где дочерние элементы представлены атрибутами::
<parent>
<child>value</child>
<child2>value</child>
</parent>
>>>parent.child
"value"
>>>parent.child2
"value"
- Одинаковые теги с указанным id превращаются в питоновский хэш::
<parent>
<items id = 'first'>value1</items>
<items id = 'second'>value2</items>
</parent>
>>>parent.items['first']
"value1"
Структура xml конфига
=====================
::
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE config>
<config>
<global>
<!-- основная секция. Все что находится в этой секции будет доступно как аттрибуты модуля config -->
<servers>
<!-- список "боевых" серверов. -->
<server id="1">
<hostname>webmorda.netstream.ru</hostname>
</server>
</servers>
<some_my_params>some my value</some_my_params>
</global>
<local>
</local>
<development>
<!-- девелоперские настройки. Эта секция будет подключена в случае если сервер на котором запущено приложение, не в списке "боевых" серверов.
Настройки из этого раздела перезаписывают одноименные настроки из основного раздела -->
<some_my_params>some my value</some_my_params>
</development>
<testing>
<!-- тестировочный раздел. Будет подключен если приложение запущено из под trial. Настройки из этого раздела перетирают одноименные настройки
из основного и девелоперского разделов.
<some_my_params>some my value</some_my_params>
</testing>
</config>
Пример использования
====================
from spamfighter.utils import config # в случае если используется конфигурационный файл ./config.xml - этой строки достаточно
config.load("cfg.py")
print config.some_my_params
"""
_config = {}
from xml.dom.minidom import *
import re
import socket
import sys
import exceptions
import os
space = re.compile("\S")
def parse_file(filename):
"""
Парсим переданный файл и возвращаем объект Cfg
Данный метод НЕ парсит секции global, development и проч, а просто переводит xml в питоновский объект
Использование:
from spamfighet.utils.config import parse_file
cfg = parse_file('cfg.xml')
print cfg.some_my_params
@type filename: C{str}
@param filename: имя файла с конфигом
@rtype : C{Cfg}
@return : экземпляр Cfg c настройками
"""
try:
dom = parse(filename)
except xml.parsers.expat.ExpatError:
raise ParseException(sys.exc_info()[1], filename)
return _parse_dom(dom)
def _parse_dom(dom):
global space
global _filename
dict = Cfg()
for elem in dom.childNodes:
if isinstance(elem, xml.dom.minidom.Text):
if space.match(elem.data):
return elem.data.encode('utf8')
elif isinstance(elem, xml.dom.minidom.Comment):
pass
else:
value = _parse_dom(elem)
if elem.hasAttribute('type'):
value = _get_class_by_name(elem.getAttribute('type'))(value)
if elem.tagName == 'include':
filename = _parse_dom(elem)
if type(filename) != type(""):
raise ValueError, value
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(os.path.dirname(_filename), filename))
subconf = _load_file(filename)
_deep_merge(dict, subconf)
else:
if elem.hasAttribute('id'):
if not dict.has_key(elem.tagName):
dict[elem.tagName] = Cfg()
dict[elem.tagName][elem.getAttribute('id')] = value
else:
dict[elem.tagName] = value
if len(dom.childNodes) == 0:
return ''
else:
return dict
def _get_class_by_name(name):
if __builtins__.has_key(name):
return __builtins__[name]
elif globals().has_key(name):
return globals()[name]
else:
raise exceptions.NameError("name %s is not defined" % name)
def _is_numeric_dict(d):
if not isinstance(d, dict) or len(d) == 0:
return False
try:
[int(val) for val in d.keys()]
except ValueError:
return False
return True
def _deep_merge(old, new):
"""
пробегаемся по всему хешу
"""
for key in new:
if _is_numeric_dict(new[key]):
old[key] = new[key]
elif isinstance(new[key], dict) and old.has_key(key):
old[key] = _deep_merge(old[key], new[key])
else:
old[key] = new[key]
return old
def _get_path():
return os.getcwd()
def load(filename = _get_path()+'/config.xml'):
"""
Загружаем переданный файл с конфигом или конфиг по умолчанию и
отображаем его в виде содержимого данного модуля.
Использование:
from spamfighther.utils import config
config.load("cfg.xml")
print config.some_my_param
"""
_export_config(_load_file(filename))
def _load_file(filename):
"""
Загружаем указанный файл и возвращаем его в качестве конфигурационного.
"""
global _filename
_filename = filename
dom = parse(filename)
_global = _parse_dom(dom.getElementsByTagName('global')[0])
_config = _global
try:
_local = _parse_dom(dom.getElementsByTagName('local')[0])
_config = _deep_merge(_global, _local)
except IndexError:
pass
_developer = os.path.exists('.svn') or os.path.exists('.git')
# Trying to guess package root
import inspect
_basedir = os.path.join(os.path.dirname(inspect.getsourcefile(_load_file)), '..', '..')
_developer = _developer or os.path.exists(os.path.join(_basedir, '.git')) or os.path.exists(os.path.join(_basedir, '.svn'))
try:
if _config.has_key('servers') and _config.servers.has_key('server'):
for server in _config.servers.server.values():
if server['hostname'] == socket.gethostname():
_config = _deep_merge(_config, server)
_developer = False
if _developer:
_development = _parse_dom(dom.getElementsByTagName('development')[0])
_config = _deep_merge(_config, _development)
except IndexError:
pass
try:
if sys.modules.has_key('twisted.trial.runner'):
_testing = _parse_dom(dom.getElementsByTagName('testing')[0])
_config = _deep_merge(_config, _testing)
if _testing.has_key('development') and _developer:
_config = _deep_merge(_config, _testing.development)
except IndexError:
pass
return _config
def _export_config(__config):
"""
Экспортирует указанный конфигурационный файл в качетсве атрибутов данного модуля.
Предварительно удаляем старые отображения.
"""
global _config
for elem in _config:
del sys.modules[__name__].__dict__[elem]
_config = __config
for elem in _config:
sys.modules[__name__].__dict__[elem] = _config[elem]
class Cfg(dict):
"""
Класс в который мы заворачиваем хеш. Нужен для того, чтобы нормально отображать штуки типа config.db.dsn
"""
def __getattr__(self,name):
if self.has_key(name):
return self[name]
else:
raise AttributeError, name
def __getitem__(self, name):
return dict.__getitem__(self, str(name))
class ParseException(Exception):
"""Исключение кидаемое при попытке распарсить файл с некорректной структурой"""
def __init__(self, message, filename):
self.message = message
self.filename = filename
def __str__(self):
return self.filename+": "+str(self.message)
try:
load()
except exceptions.IOError:
pass
| gpl-3.0 |
filias/django | django/core/cache/backends/db.py | 480 | 8628 | "Database cache backend."
import base64
from datetime import datetime
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.db import DatabaseError, connections, models, router, transaction
from django.utils import six, timezone
from django.utils.encoding import force_bytes
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.model_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
self.swapped = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
# This class uses cursors provided by the database connection. This means
# it reads expiration values as aware or naive datetimes, depending on the
# value of USE_TZ and whether the database supports time zones. The ORM's
# conversion and adaptation infrastructure is then used to avoid comparing
# aware and naive datetimes accidentally.
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
connection = connections[db]
table = connection.ops.quote_name(self._table)
with connection.cursor() as cursor:
cursor.execute("SELECT cache_key, value, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
expires = row[2]
expression = models.Expression(output_field=models.DateTimeField())
for converter in (connection.ops.get_db_converters(expression) +
expression.get_db_converters(connection)):
expires = converter(expires, expression, connection, {})
if expires < timezone.now():
db = router.db_for_write(self.cache_model_class)
connection = connections[db]
with connection.cursor() as cursor:
cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key])
return default
value = connection.ops.process_clob(row[1])
return pickle.loads(base64.b64decode(force_bytes(value)))
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
timeout = self.get_backend_timeout(timeout)
db = router.db_for_write(self.cache_model_class)
connection = connections[db]
table = connection.ops.quote_name(self._table)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = timezone.now()
now = now.replace(microsecond=0)
if timeout is None:
exp = datetime.max
elif settings.USE_TZ:
exp = datetime.utcfromtimestamp(timeout)
else:
exp = datetime.fromtimestamp(timeout)
exp = exp.replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
b64encoded = base64.b64encode(pickled)
# The DB column is expecting a string, so make sure the value is a
# string, not bytes. Refs #19274.
if six.PY3:
b64encoded = b64encoded.decode('latin1')
try:
# Note: typecasting for datetimes is needed by some 3rd party
# database backends. All core backends work without typecasting,
# so be careful about changes here - test suite will NOT pick
# regressions.
with transaction.atomic(using=db):
cursor.execute("SELECT cache_key, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
result = cursor.fetchone()
if result:
current_expires = result[1]
expression = models.Expression(output_field=models.DateTimeField())
for converter in (connection.ops.get_db_converters(expression) +
expression.get_db_converters(connection)):
current_expires = converter(current_expires, expression, connection, {})
exp = connection.ops.adapt_datetimefield_value(exp)
if result and (mode == 'set' or (mode == 'add' and current_expires < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
"WHERE cache_key = %%s" % table,
[b64encoded, exp, key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) "
"VALUES (%%s, %%s, %%s)" % table,
[key, b64encoded, exp])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
return False
else:
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
connection = connections[db]
table = connection.ops.quote_name(self._table)
with connection.cursor() as cursor:
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
connection = connections[db]
table = connection.ops.quote_name(self._table)
if settings.USE_TZ:
now = datetime.utcnow()
else:
now = datetime.now()
now = now.replace(microsecond=0)
with connection.cursor() as cursor:
cursor.execute("SELECT cache_key FROM %s "
"WHERE cache_key = %%s and expires > %%s" % table,
[key, connection.ops.adapt_datetimefield_value(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
connection = connections[db]
table = connection.ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connection.ops.adapt_datetimefield_value(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cull_num = num // self._cull_frequency
cursor.execute(
connection.ops.cache_key_culling_sql() % table,
[cull_num])
cursor.execute("DELETE FROM %s "
"WHERE cache_key < %%s" % table,
[cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
connection = connections[db]
table = connection.ops.quote_name(self._table)
with connection.cursor() as cursor:
cursor.execute('DELETE FROM %s' % table)
| bsd-3-clause |
papouso/odoo | addons/l10n_br/account.py | 340 | 10565 | # -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import openerp
from openerp.osv import fields, osv
TAX_CODE_COLUMNS = {
'domain':fields.char('Domain',
help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS, COFINS and others taxes included)."),
}
TAX_DEFAULTS = {
'base_reduction': 0,
'amount_mva': 0,
}
class account_tax_code_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code.template'
_columns = TAX_CODE_COLUMNS
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id,
context=None):
"""This function generates the tax codes from the templates of tax
code that are children of the given one passed in argument. Then it
returns a dictionary with the mappping between the templates and the
real objects.
:param tax_code_root_id: id of the root of all the tax code templates
to process.
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the
real objects.
:rtype: dict
"""
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
parent_id = tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': parent_id,
'company_id': company_id,
'sign': tax_code_template.sign,
'domain': tax_code_template.domain,
'tax_discount': tax_code_template.tax_discount,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),
('parent_id','=',parent_id),
('code', '=', vals['code']),
('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
class account_tax_code(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code'
_columns = TAX_CODE_COLUMNS
def get_precision_tax():
def change_digit_tax(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, 1, 'Account')
return (16, res+2)
return change_digit_tax
class account_tax_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.template'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
result = super(account_tax_template, self)._generate_tax(cr, uid,
tax_templates,
tax_code_template_ref,
company_id,
context)
tax_templates = self.browse(cr, uid, result['tax_template_to_tax'].keys(), context)
obj_acc_tax = self.pool.get('account.tax')
for tax_template in tax_templates:
if tax_template.tax_code_id:
obj_acc_tax.write(cr, uid, result['tax_template_to_tax'][tax_template.id], {'domain': tax_template.tax_code_id.domain,
'tax_discount': tax_template.tax_code_id.tax_discount})
return result
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code.template').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
class account_tax(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
| agpl-3.0 |
Pymatteo/QtNMR | build/exe.win32-3.4/scipy/sparse/csgraph/_laplacian.py | 7 | 3818 | """
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False, use_out_degree=False):
"""
Return the Laplacian matrix of a directed graph.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then also return an array related to vertex degrees.
use_out_degree : bool, optional
If True, then use out-degree instead of in-degree.
This distinction matters only if the graph is asymmetric.
Default: False.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray, optional
The length-N diagonal of the Laplacian matrix.
For the normalized Laplacian, this is the array of square roots
of vertex degrees or 1 if the degree is zero.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
create_lap = _laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense
degree_axis = 1 if use_out_degree else 0
lap, d = create_lap(csgraph, normed=normed, axis=degree_axis)
if return_diag:
return lap, d
return lap
def _setdiag_dense(A, d):
A.flat[::len(d)+1] = d
def _laplacian_sparse(graph, normed=False, axis=0):
n = graph.shape[0]
if graph.format == 'coo':
m = graph.copy()
else:
m = graph.tocoo()
w = m.sum(axis=axis).getA1() - m.diagonal()
if normed:
w = np.sqrt(w)
isolated_node_mask = (w == 0)
w[isolated_node_mask] = 1
m.data /= w[m.row]
m.data /= w[m.col]
m.data *= -1
m.setdiag(1 - isolated_node_mask)
else:
m.data *= -1
m.setdiag(w)
return m, w
def _laplacian_dense(graph, normed=False, axis=0):
n = graph.shape[0]
m = np.array(graph)
np.fill_diagonal(m, 0)
w = m.sum(axis=axis)
if normed:
w = np.sqrt(w)
isolated_node_mask = (w == 0)
w[isolated_node_mask] = 1
m /= w
m /= w[:, np.newaxis]
m *= -1
_setdiag_dense(m, 1 - isolated_node_mask)
else:
m *= -1
_setdiag_dense(m, w)
return m, w
| gpl-3.0 |
nagyistoce/edx-platform | lms/djangoapps/courseware/tests/factories.py | 91 | 4785 | # Factories are self documenting
# pylint: disable=missing-docstring
import json
from functools import partial
import factory
from factory.django import DjangoModelFactory
# Imported to re-export
# pylint: disable=unused-import
from student.tests.factories import UserFactory # Imported to re-export
# pylint: enable=unused-import
from student.tests.factories import UserProfileFactory as StudentUserProfileFactory
from courseware.models import StudentModule, XModuleUserStateSummaryField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.roles import (
CourseInstructorRole,
CourseStaffRole,
CourseBetaTesterRole,
GlobalStaff,
OrgStaffRole,
OrgInstructorRole,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
# TODO fix this (course_id and location are invalid names as constants, and course_id should really be COURSE_KEY)
# pylint: disable=invalid-name
course_id = SlashSeparatedCourseKey(u'edX', u'test_course', u'test')
location = partial(course_id.make_usage_key, u'problem')
class UserProfileFactory(StudentUserProfileFactory):
courseware = 'course.xml'
# For the following factories, these are disabled because we're ok ignoring the
# unused arguments create and **kwargs in the line:
# course_key(self, create, extracted, **kwargs)
# pylint: disable=unused-argument
class InstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with instructor
permissions for `course`.
"""
last_name = "Instructor"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a course instructor user")
CourseInstructorRole(extracted).add_users(self)
class StaffFactory(UserFactory):
"""
Given a course Location, returns a User object with staff
permissions for `course`.
"""
last_name = "Staff"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a course staff user")
CourseStaffRole(extracted).add_users(self)
class BetaTesterFactory(UserFactory):
"""
Given a course Location, returns a User object with beta-tester
permissions for `course`.
"""
last_name = "Beta-Tester"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a beta-tester user")
CourseBetaTesterRole(extracted).add_users(self)
class OrgStaffFactory(UserFactory):
"""
Given a course Location, returns a User object with org-staff
permissions for `course`.
"""
last_name = "Org-Staff"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for an org-staff user")
OrgStaffRole(extracted.org).add_users(self)
class OrgInstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with org-instructor
permissions for `course`.
"""
last_name = "Org-Instructor"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for an org-instructor user")
OrgInstructorRole(extracted.org).add_users(self)
class GlobalStaffFactory(UserFactory):
"""
Returns a User object with global staff access
"""
last_name = "GlobalStaff"
@factory.post_generation
def set_staff(self, create, extracted, **kwargs):
GlobalStaff().add_users(self)
# pylint: enable=unused-argument
class StudentModuleFactory(DjangoModelFactory):
FACTORY_FOR = StudentModule
module_type = "problem"
student = factory.SubFactory(UserFactory)
course_id = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
state = None
grade = None
max_grade = None
done = 'na'
class UserStateSummaryFactory(DjangoModelFactory):
FACTORY_FOR = XModuleUserStateSummaryField
field_name = 'existing_field'
value = json.dumps('old_value')
usage_id = location('usage_id')
class StudentPrefsFactory(DjangoModelFactory):
FACTORY_FOR = XModuleStudentPrefsField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory)
module_type = 'mock_problem'
class StudentInfoFactory(DjangoModelFactory):
FACTORY_FOR = XModuleStudentInfoField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory)
| agpl-3.0 |
Ladeia/pingo-py | pingo/pcduino/tests/test_pcduino.py | 7 | 1463 | import unittest
import pingo
from pingo.test import level0
from pingo.test import level1
from pingo.detect import check_board
running_on_pcduino = check_board(pingo.pcduino.PcDuino)
class PcDuinoTest(unittest.TestCase):
def setUp(self):
self.board = pingo.pcduino.PcDuino()
# Level0 Parameters
self.digital_output_pin_number = 3
self.digital_input_pin_number = 0
self.total_pins = 20
# Level1 Parameters
self.analog_input_pin_number = 'A3'
self.expected_analog_input = 4096
self.expected_analog_ratio = 0.98
def tearDown(self):
self.board.cleanup()
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoBasics(PcDuinoTest, level0.BoardBasics):
def test_list_pins(self):
pin = self.board.pins[self.digital_output_pin_number]
assert isinstance(pin, pingo.DigitalPin)
data_pins = len(self.board.pins)
assert data_pins == self.total_pins
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoExceptions(PcDuinoTest, level0.BoardExceptions):
pass
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoAnalogRead(PcDuinoTest, level1.AnalogReadBasics):
pass
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoAnalogExceptions(PcDuinoTest, level1.AnalogExceptions):
pass
if __name__ == '__main__':
unittest.main()
| mit |
jayrambhia/SimpleCV2 | SimpleCV/examples/tracking/surftest.py | 12 | 1224 | """
Example of SURFTracker
"""
from SimpleCV import *
def surftest():
cam = Camera()
img = cam.getImage()
d = Display(img.size())
img, bb1 = getBBFromUser(cam,d)
fs1=[]
while True:
try:
img1 = cam.getImage()
fs1 = img1.track("surf",fs1,img,bb1, eps_val=0.8, dist=200, nframes=100)
fs1.drawBB(color=Color.RED)
fs1[-1].drawTrackerPoints()
print fs1[-1].getBB()
img1.show()
except KeyboardInterrupt:
break
def getBBFromUser(cam, d):
p1 = None
p2 = None
img = cam.getImage()
while d.isNotDone():
try:
img = cam.getImage()
img.save(d)
dwn = d.leftButtonDownPosition()
up = d.leftButtonUpPosition()
if dwn:
p1 = dwn
if up:
p2 = up
break
time.sleep(0.05)
except KeyboardInterrupt:
break
if not p1 or not p2:
return None
xmax = np.max((p1[0],p2[0]))
xmin = np.min((p1[0],p2[0]))
ymax = np.max((p1[1],p2[1]))
ymin = np.min((p1[1],p2[1]))
return (img,(xmin,ymin,xmax-xmin,ymax-ymin))
surftest()
| bsd-3-clause |
ericfc/django | tests/gis_tests/test_measure.py | 325 | 7363 | """
Distance and Area objects to allow for sensible and convenient calculation
and conversions. Here are some tests.
"""
import unittest
from django.contrib.gis.measure import A, D, Area, Distance
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialization from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialization from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
with self.assertRaises(TypeError):
d1 + 1
with self.assertRaises(TypeError):
d1 - 1
with self.assertRaises(TypeError):
d1 += 1
with self.assertRaises(TypeError):
d1 -= 1
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = d1 / D(m=2)
self.assertEqual(d5, 50)
a5 = d1 * D(m=10)
self.assertIsInstance(a5, Area)
self.assertEqual(a5.sq_m, 100 * 10)
with self.assertRaises(TypeError):
d1 *= D(m=1)
with self.assertRaises(TypeError):
d1 /= D(m=1)
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertGreater(d2, d1)
self.assertEqual(d1, d1)
self.assertLess(d1, d2)
self.assertFalse(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialization from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialization from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a1 + 1
with self.assertRaises(TypeError):
a1 - 1
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a1 * A(sq_m=1)
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
a1 / A(sq_m=1)
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == "__main__":
run()
| bsd-3-clause |
d1zzy/gogbot | plugins/quotes.py | 1 | 9409 | import logging
import re
import requests
import sqlite3
import time
from urllib import parse as url_parse
from lib import config
from lib import irc
class Handler(irc.HandlerBase):
"""IRC handler to support !quote command."""
# Regular expressions to match against supported commands.
_GET_QUOTE_RE = re.compile(r'^!quote(?: +#?(\d+)$|$)', flags=re.IGNORECASE)
_ADD_QUOTE_RE = re.compile(r'^!quote +add +([^ ].*)$', flags=re.IGNORECASE)
_RAWADD_QUOTE_RE = re.compile(r'^!quote +rawadd +([^ ].*)$',
flags=re.IGNORECASE)
_UPDATE_QUOTE_RE = re.compile(r'^!quote +update +#?(\d+) +([^ ].*)$',
flags=re.IGNORECASE)
_DEL_QUOTE_RE = re.compile(r'^!quote +del +#?(\d+)$', flags=re.IGNORECASE)
_HELP_RE = re.compile(r'^!quote +help$', flags=re.IGNORECASE)
def __init__(self, conn, conf):
super().__init__(conn)
self._client_id = conf['CONNECTION']['client_id'].lower()
self._channel = conf['CONNECTION']['channel'].lower()
quote_section = config.GetSection(conf, 'quotes')
if 'db_file' not in quote_section:
raise Exception('"db_file" not found in QUOTE config section')
self._db = sqlite3.connect(quote_section['db_file'])
self._table = quote_section['db_table']
self._report_errors = quote_section.getboolean('report_errors')
self._use_whisper = quote_section.getboolean('use_whisper')
def _ReportError(self, recipient, fmt, *args, level=logging.WARNING):
if level is not None:
logging.log(level, fmt, *args)
if self._report_errors:
if self._use_whisper:
self._conn.SendWhisper(recipient, fmt % args)
else:
# Send a nicely formatted chat (public) message with the
# user as a prefix.
self._conn.SendMessage(self._channel,
''.join((recipient, ': ', fmt % args)))
def HandlePRIVMSG(self, msg):
"""The entry point into this plugin, handle a chat message."""
parts = irc.SplitPRIVMSG(msg)
if len(parts) < 2 or not parts[1]:
logging.warning('Got invalid PRIVMSG: %r', msg)
return False
command = parts[1].strip()
if not command:
return False
match = self._GET_QUOTE_RE.match(command)
if match:
return self._HandleGetQuote(msg, match)
match = self._ADD_QUOTE_RE.match(command)
if match:
return self._HandleAddQuote(msg, match)
match = self._RAWADD_QUOTE_RE.match(command)
if match:
return self._HandleRawAddQuote(msg, match)
match = self._UPDATE_QUOTE_RE.match(command)
if match:
return self._HandleUpdateQuote(msg, match)
match = self._DEL_QUOTE_RE.match(command)
if match:
return self._HandleDelQuote(msg, match)
match = self._HELP_RE.match(command)
if match:
return self._HandleHelp()
return False
def _HandleGetQuote(self, msg, match):
"""Handle "!quote" and "!quote <number>" commands."""
index = match.group(1)
cur = self._db.cursor()
if index:
cur.execute('SELECT CustomId, Text FROM %s WHERE CustomId = ?' %
self._table, (index,))
else:
cur.execute(
'SELECT CustomId, Text FROM %s ORDER BY random() limit 1' %
self._table)
row = cur.fetchone()
if not row:
self._ReportError(msg.sender, 'Failed to get quote #%s', index)
return False
self._conn.SendMessage(self._channel, '#%s: %s' % (row[0], row[1]))
return True
def _AuthorizeElevatedCommand(self, sender):
"""Return true/false if "sender" is a moderator."""
# Twitch takes some time (on the order of minutes) between the bot
# joining the channel and getting the user list so it's possible that
# users we don't know about are issuing elevated commands, in that case
# we don't have much of a choice and just ignore them.
user = self._conn.GetUserList().get(sender)
if not user:
self._ReportError(sender, "User %r tried elevated quotes command "
"but we don't know about them from Twitch yet, "
"ignoring it.", sender, level=logging.INFO)
return False
if not user.IsModerator():
self._ReportError(sender, 'Unprivileged user %r tried to issue '
'elevated quotes command', sender)
return False
return True
def _GetCurrentGame(self, sender):
"""Get the current game set on a channel using Twitch API.
TODO(dizzy): if other code starts using Twitch API, make an internal
helper library for it.
"""
# Drop "#" from the start of the channel name, Twitch doesn't need it.
if len(self._channel) < 2:
logging.warning('Unexpectadly short channel name: %r',
self._channel)
return None
channel = self._channel[1:]
url = ('https://api.twitch.tv/kraken/channels/%s' %
url_parse.quote(channel))
headers = {'ACCEPT': 'application/vnd.twitchtv.v3+json',
'Client-ID': self._client_id,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) '
'Gecko/20100101 Firefox/6.0'}
req = requests.get(url, headers=headers)
if req.status_code != 200:
self._ReportError(
sender, 'Twitch API game name request failed: %s %s',
req.status_code, req.reason, level=logging.ERROR)
return None
game = req.json()
if not game or 'game' not in game:
self._ReportError(sender, 'Got empty game name')
return None
return game['game']
def _AddQuoteToDb(self, quote):
"""Adds the given quote to the database."""
cur = self._db.cursor()
cur.execute('INSERT INTO %(table)s (CustomId, Text) '
'SELECT max(CustomId) + 1, ? FROM %(table)s' %
{'table': self._table}, (quote,))
cur.execute('SELECT CustomId FROM %s WHERE AutoId = ?' % self._table,
(cur.lastrowid,))
row = cur.fetchone()
if not row:
logging.error('Failed to get last added quote')
self._conn.SendMessage(self._channel, 'Failed to add quote')
self._db.rollback()
return None
idx = row[0]
self._conn.SendMessage(self._channel, 'Added quote #%s' % idx)
self._db.commit()
return idx
def _HandleAddQuote(self, msg, match):
"""Handle "!quote add ..." command."""
if not self._AuthorizeElevatedCommand(msg.sender):
return True
text = match.group(1).strip()
date_str = time.strftime('%d.%m.%Y', time.gmtime())
game = self._GetCurrentGame(msg.sender)
if not game:
return True
text += ' [%s] [%s]' % (game, date_str)
idx = self._AddQuoteToDb(text)
if idx:
logging.info('User %r added quote #%s', msg.sender, idx)
return True
def _HandleRawAddQuote(self, msg, match):
"""Handle "!quote rawadd ..." command."""
if not self._AuthorizeElevatedCommand(msg.sender):
return True
text = match.group(1).strip()
idx = self._AddQuoteToDb(text)
if idx:
logging.info('User %r added quote #%s', msg.sender, idx)
return True
def _HandleUpdateQuote(self, msg, match):
"""Handle "!quote update ..." command."""
if not self._AuthorizeElevatedCommand(msg.sender):
return True
index = match.group(1)
text = match.group(2).strip()
cur = self._db.cursor()
cur.execute('UPDATE %s SET Text = ? WHERE CustomId = ?' %
self._table, (text, index,))
if cur.rowcount != 1:
self._ReportError(msg.sender, "Failed to update quote #%s", index)
return True
self._db.commit()
self._conn.SendMessage(self._channel, 'Updated quote #%s' % index)
logging.info('User %s updated quote #%s to: %s',
msg.sender, index, text)
return True
def _HandleDelQuote(self, msg, match):
"""Handle "!quote del ..." command."""
if not self._AuthorizeElevatedCommand(msg.sender):
return True
index = match.group(1)
cur = self._db.cursor()
cur.execute('DELETE FROM %s WHERE CustomId = ?' % self._table, (index,))
if cur.rowcount != 1:
self._ReportError(msg.sender, "Failed to remove quote #%s", index)
return True
self._db.commit()
self._conn.SendMessage(self._channel, 'Deleted quote #%s' % index)
logging.info('User %r removed quote #%s', msg.sender, index)
return True
def _HandleHelp(self):
"""Handle "!quote help"."""
self._conn.SendMessage(
self._channel, 'Quotes plugin documentation: https://goo.gl/h7028Q')
return True
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/forms_tests/tests/test_extra.py | 13 | 35893 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import (
CharField, DateField, EmailField, FileField, Form, GenericIPAddressField,
HiddenInput, ImageField, IPAddressField, MultipleChoiceField,
MultiValueField, MultiWidget, PasswordInput, SelectMultiple, SlugField,
SplitDateTimeField, SplitDateTimeWidget, TextInput, URLField,
)
from django.forms.extras import SelectDateWidget
from django.forms.utils import ErrorList
from django.test import TestCase, ignore_warnings, override_settings
from django.utils import six, translation
from django.utils.dates import MONTHS_AP
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from .test_error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
self.maxDiff = None
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
# Rendering the default state.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>""")
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>""")
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselves be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text', ['X'], ['2007-04-25', '6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text', ['JP']])
# test with no initial data
self.assertTrue(f.has_changed(None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_ipaddress(self):
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join('<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_selectdatewidget_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
@override_settings(USE_L10N=True)
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
translation.activate('nl')
def tearDown(self):
translation.deactivate()
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
| mit |
sr-murthy/firefox-ui-tests | create_venv.py | 1 | 4062 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The script can be used to setup a virtual environment for running Firefox UI Tests.
It will automatically install the firefox ui test package, all its dependencies,
and optional packages if specified.
"""
import optparse
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
# Link to the folder, which contains the zip archives of virtualenv
VIRTUALENV_URL = 'https://github.com/pypa/virtualenv/archive/%(VERSION)s.zip'
VIRTUALENV_VERSION = '12.1.1'
here = os.path.dirname(os.path.abspath(__file__))
venv_script_path = 'Scripts' if sys.platform == 'win32' else 'bin'
venv_activate = os.path.join(venv_script_path, 'activate')
venv_activate_this = os.path.join(venv_script_path, 'activate_this.py')
venv_python_bin = os.path.join(venv_script_path, 'python')
usage_message = """
***********************************************************************
To run the Firefox UI Tests, activate the virtual environment:
{}{}
See firefox-ui-tests --help for all options
***********************************************************************
"""
def download(url, target):
"""Downloads the specified url to the given target."""
response = urllib2.urlopen(url)
with open(target, 'wb') as f:
f.write(response.read())
return target
def create_virtualenv(target, python_bin=None):
script_path = os.path.join(here, 'virtualenv-%s' % VIRTUALENV_VERSION,
'virtualenv.py')
print 'Downloading virtualenv %s' % VIRTUALENV_VERSION
zip_path = download(VIRTUALENV_URL % {'VERSION': VIRTUALENV_VERSION},
os.path.join(here, 'virtualenv.zip'))
try:
with zipfile.ZipFile(zip_path, 'r') as f:
f.extractall(here)
print 'Creating new virtual environment'
cmd_args = [sys.executable, script_path, target]
if python_bin:
cmd_args.extend(['-p', python_bin])
subprocess.check_call(cmd_args)
finally:
try:
os.remove(zip_path)
except OSError:
pass
shutil.rmtree(os.path.dirname(script_path), ignore_errors=True)
def main():
parser = optparse.OptionParser('Usage: %prog [options] path_to_venv')
parser.add_option('-p', '--python',
type='string',
dest='python',
metavar='BINARY',
help='The Python interpreter to use.')
parser.add_option('--with-optional-packages',
dest='with_optional',
default=False,
action='store_true',
help='Installs optional packages for enhanced usability.')
(options, args) = parser.parse_args(args=None, values=None)
if len(args) != 1:
parser.error('Path to the environment has to be specified')
target = args[0]
assert target
# Remove an already existent virtual environment
if os.path.exists(target):
print 'Removing already existent virtual environment at: %s' % target
shutil.rmtree(target, True)
create_virtualenv(target, python_bin=options.python)
# Activate the environment
tps_env = os.path.join(target, venv_activate_this)
execfile(tps_env, dict(__file__=tps_env))
# Install Firefox UI tests, dependencies and optional packages
command = ['pip', 'install', os.getcwd()]
if options.with_optional:
command.extend(['-r', 'optional_packages.txt'])
print 'Installing Firefox UI Tests and dependencies...'
print 'Command: %s' % command
subprocess.check_call(command)
# Print the user instructions
print usage_message.format('' if sys.platform == 'win32' else 'source ',
os.path.join(target, venv_activate))
if __name__ == "__main__":
main()
| mpl-2.0 |
Debian/openjfx | modules/web/src/main/native/Tools/TestResultServer/model/jsonresults_unittest.py | 6 | 28358 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import jsonresults
from jsonresults import JsonResults
except ImportError:
print "ERROR: Add the TestResultServer, google_appengine and yaml/lib directories to your PYTHONPATH"
raise
from django.utils import simplejson
import unittest
JSON_RESULTS_TEMPLATE = (
'{"Webkit":{'
'"allFixableCount":[[TESTDATA_COUNT]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"deferredCounts":[[TESTDATA_COUNTS]],'
'"fixableCount":[[TESTDATA_COUNT]],'
'"fixableCounts":[[TESTDATA_COUNTS]],'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]},'
'"webkitRevision":[[TESTDATA_WEBKITREVISION]],'
'"wontfixCounts":[[TESTDATA_COUNTS]]'
'},'
'"version":[VERSION]'
'}')
JSON_RESULTS_COUNTS_TEMPLATE = (
'{'
'"C":[TESTDATA],'
'"F":[TESTDATA],'
'"I":[TESTDATA],'
'"O":[TESTDATA],'
'"P":[TESTDATA],'
'"T":[TESTDATA],'
'"X":[TESTDATA],'
'"Z":[TESTDATA]}')
JSON_RESULTS_DIRECTORY_TEMPLATE = '[[TESTDATA_DIRECTORY]]:{[TESTDATA_DATA]}'
JSON_RESULTS_TESTS_TEMPLATE = (
'[[TESTDATA_TEST_NAME]]:{'
'"results":[[TESTDATA_TEST_RESULTS]],'
'"times":[[TESTDATA_TEST_TIMES]]}')
JSON_RESULTS_TEST_LIST_TEMPLATE = (
'{"Webkit":{"tests":{[TESTDATA_TESTS]}}}')
class JsonResultsTest(unittest.TestCase):
def setUp(self):
self._builder = "Webkit"
def test_strip_prefix_suffix(self):
json = "['contents']"
self.assertEqual(JsonResults._strip_prefix_suffix("ADD_RESULTS(" + json + ");"), json)
self.assertEqual(JsonResults._strip_prefix_suffix(json), json)
def _make_test_json(self, test_data):
if not test_data:
return ""
builds = test_data["builds"]
tests = test_data["tests"]
if not builds or not tests:
return ""
json = JSON_RESULTS_TEMPLATE
counts = []
build_numbers = []
webkit_revision = []
times = []
for build in builds:
counts.append(JSON_RESULTS_COUNTS_TEMPLATE.replace("[TESTDATA]", build))
build_numbers.append("1000%s" % build)
webkit_revision.append("2000%s" % build)
times.append("100000%s000" % build)
json = json.replace("[TESTDATA_COUNTS]", ",".join(counts))
json = json.replace("[TESTDATA_COUNT]", ",".join(builds))
json = json.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
json = json.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
json = json.replace("[TESTDATA_TIMES]", ",".join(times))
version = str(test_data["version"]) if "version" in test_data else "4"
json = json.replace("[VERSION]", version)
json = json.replace("{[TESTDATA_TESTS]}", simplejson.dumps(tests, separators=(',', ':'), sort_keys=True))
return json
def _test_merge(self, aggregated_data, incremental_data, expected_data, max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
aggregated_results = self._make_test_json(aggregated_data)
incremental_results = self._make_test_json(incremental_data)
merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_results, max_builds, sort_keys=True)
if expected_data:
expected_results = self._make_test_json(expected_data)
self.assertEqual(merged_results, expected_results)
else:
self.assertFalse(merged_results)
def _test_get_test_list(self, input_data, expected_data):
input_results = self._make_test_json(input_data)
expected_results = JSON_RESULTS_TEST_LIST_TEMPLATE.replace("{[TESTDATA_TESTS]}", simplejson.dumps(expected_data, separators=(',', ':')))
actual_results = JsonResults.get_test_list(self._builder, input_results)
self.assertEqual(actual_results, expected_results)
def test_merge_null_incremental_results(self):
# Empty incremental results json.
# Nothing to merge.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Incremental results
None,
# Expect no merge happens.
None)
def test_merge_empty_incremental_results(self):
# No actual incremental test results (only prefix and suffix) to merge.
# Nothing to merge.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Incremental results
{"builds": [],
"tests": {}},
# Expected no merge happens.
None)
def test_merge_empty_aggregated_results(self):
# No existing aggregated results.
# Merged results == new incremental results.
self._test_merge(
# Aggregated results
None,
# Incremental results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Expected result
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}})
def test_merge_duplicate_build_number(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[100, "F"]],
"times": [[100, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
"results": [[1, "F"]],
"times": [[1, 0]]}}},
# Expected results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[100, "F"]],
"times": [[100, 0]]}}})
def test_merge_incremental_single_test_single_run_same_result(self):
# Incremental results has the latest build and same test results for
# that run.
# Insert the incremental results at the first place and sum number
# of runs for "F" (200 + 1) to get merged results.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"F"]],
"times": [[1,0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201,"F"]],
"times": [[201,0]]}}})
def test_merge_single_test_single_run_different_result(self):
# Incremental results has the latest build but different test results
# for that run.
# Insert the incremental results at the first place.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, "I"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1,"I"],[200,"F"]],
"times": [[1,1],[200,0]]}}})
def test_merge_single_test_single_run_result_changed(self):
# Incremental results has the latest build but results which differ from
# the latest result (but are the same as an older result).
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"],[10,"I"]],
"times": [[200,0],[10,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"I"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1,"I"],[200,"F"],[10,"I"]],
"times": [[1,1],[200,0],[10,1]]}}})
def test_merge_multiple_tests_single_run(self):
# All tests have incremental updates.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]},
"002.html": {
"results": [[100,"I"]],
"times": [[100,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"F"]],
"times": [[1,0]]},
"002.html": {
"results": [[1,"I"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201,"F"]],
"times": [[201,0]]},
"002.html": {
"results": [[101,"I"]],
"times": [[101,1]]}}})
def test_merge_multiple_tests_single_run_one_no_result(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]},
"002.html": {
"results": [[100,"I"]],
"times": [[100,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"002.html": {
"results": [[1,"I"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1,"N"],[200,"F"]],
"times": [[201,0]]},
"002.html": {
"results": [[101,"I"]],
"times": [[101,1]]}}})
def test_merge_single_test_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
"results": [[2, "I"]],
"times": [[2,2]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
"results": [[2,"I"],[200,"F"]],
"times": [[2,2],[200,0]]}}})
def test_merge_multiple_tests_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"F"]],
"times": [[200,0]]},
"002.html": {
"results": [[10,"Z"]],
"times": [[10,0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
"results": [[2, "I"]],
"times": [[2,2]]},
"002.html": {
"results": [[1,"C"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
"results": [[2,"I"],[200,"F"]],
"times": [[2,2],[200,0]]},
"002.html": {
"results": [[1,"C"],[10,"Z"]],
"times": [[1,1],[10,0]]}}})
def test_merge_incremental_result_older_build(self):
# Test the build in incremental results is older than the most recent
# build in aggregated results.
self._test_merge(
# Aggregated results
{"builds": ["3", "1"],
"tests": {"001.html": {
"results": [[5,"F"]],
"times": [[5,0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
"results": [[1, "F"]],
"times": [[1,0]]}}},
# Expected no merge happens.
{"builds": ["2", "3", "1"],
"tests": {"001.html": {
"results": [[6,"F"]],
"times": [[6,0]]}}})
def test_merge_incremental_result_same_build(self):
# Test the build in incremental results is same as the build in
# aggregated results.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[5,"F"]],
"times": [[5,0]]}}},
# Incremental results
{"builds": ["3", "2"],
"tests": {"001.html": {
"results": [[2, "F"]],
"times": [[2,0]]}}},
# Expected no merge happens.
{"builds": ["3", "2", "2", "1"],
"tests": {"001.html": {
"results": [[7,"F"]],
"times": [[7,0]]}}})
def test_merge_remove_new_test(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[199, "F"]],
"times": [[199, 0]]},
}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, "F"]],
"times": [[1, 0]]},
"002.html": {
"results": [[1, "P"]],
"times": [[1, 0]]},
"003.html": {
"results": [[1, "N"]],
"times": [[1, 0]]},
"004.html": {
"results": [[1, "X"]],
"times": [[1, 0]]},
}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[200, "F"]],
"times": [[200, 0]]},
}},
max_builds=200)
def test_merge_remove_test(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"P"]],
"times": [[200,0]]},
"002.html": {
"results": [[10,"F"]],
"times": [[10,0]]},
"003.html": {
"results": [[190, 'X'], [9, 'N'], [1,"F"]],
"times": [[200,0]]},
}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"P"]],
"times": [[1,0]]},
"002.html": {
"results": [[1,"P"]],
"times": [[1,0]]},
"003.html": {
"results": [[1,"P"]],
"times": [[1,0]]},
}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"002.html": {
"results": [[1,"P"],[10,"F"]],
"times": [[11,0]]}}},
max_builds=200)
def test_merge_keep_test_with_all_pass_but_slow_time(self):
# Do not remove test where all run pass but max running time >= 5 seconds
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200,"P"]],
"times": [[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[10,"F"]],
"times": [[10,0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"P"]],
"times": [[1,1]]},
"002.html": {
"results": [[1,"P"]],
"times": [[1,0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201,"P"]],
"times": [[1,1],[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[1,"P"],[10,"F"]],
"times": [[11,0]]}}})
def test_merge_prune_extra_results(self):
# Remove items from test results and times that exceed the max number
# of builds to track.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds,"F"],[1,"I"]],
"times": [[max_builds,0],[1,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"T"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1,"T"],[max_builds,"F"]],
"times": [[1,1],[max_builds,0]]}}})
def test_merge_prune_extra_results_small(self):
# Remove items from test results and times that exceed the max number
# of builds to track, using smaller threshold.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds,"F"],[1,"I"]],
"times": [[max_builds,0],[1,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"T"]],
"times": [[1,1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1,"T"],[max_builds,"F"]],
"times": [[1,1],[max_builds,0]]}}},
int(max_builds))
def test_merge_prune_extra_results_with_new_result_of_same_type(self):
# Test that merging in a new result of the same type as the last result
# causes old results to fall off.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds,"F"],[1,"N"]],
"times": [[max_builds,0],[1,1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1,"F"]],
"times": [[1,0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[max_builds,"F"]],
"times": [[max_builds,0]]}}},
int(max_builds))
# FIXME: Some data got corrupted and has results and times at the directory level.
# Once we've purged this from all the data, we should throw an error on this case.
def test_merge_directory_hierarchy_extra_results_and_times(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"baz": {
"003.html": {
"results": [[25,"F"]],
"times": [[25,0]]}},
"results": [[25,"F"]],
"times": [[25,0]]}},
# Incremental results
{"builds": ["3"],
"tests": {"baz": {
"003.html": {
"results": [[1,"F"]],
"times": [[1,0]]}}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"baz": {
"003.html": {
"results": [[26,"F"]],
"times": [[26,0]]}}},
"version": 4})
def test_merge_build_directory_hierarchy(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
"results": [[25,"F"]],
"times": [[25,0]]}}},
"foo": {
"001.html": {
"results": [[50,"F"]],
"times": [[50,0]]},
"002.html": {
"results": [[100,"I"]],
"times": [[100,0]]}}},
"version": 4},
# Incremental results
{"builds": ["3"],
"tests": {"baz": {
"004.html": {
"results": [[1,"I"]],
"times": [[1,0]]}},
"foo": {
"001.html": {
"results": [[1,"F"]],
"times": [[1,0]]},
"002.html": {
"results": [[1,"I"]],
"times": [[1,0]]}}},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
"results": [[1,"N"],[25,"F"]],
"times": [[26,0]]}}},
"baz": {
"004.html": {
"results": [[1,"I"]],
"times": [[1,0]]}},
"foo": {
"001.html": {
"results": [[51,"F"]],
"times": [[51,0]]},
"002.html": {
"results": [[101,"I"]],
"times": [[101,0]]}}},
"version": 4})
# FIXME(aboxhall): Add some tests for xhtml/svg test results.
def test_get_test_name_list(self):
# Get test name list only. Don't include non-test-list data and
# of test result details.
# FIXME: This also tests a temporary bug in the data where directory-level
# results have a results and times values. Once that bug is fixed,
# remove this test-case and assert we don't ever hit it.
self._test_get_test_list(
# Input results
{"builds": ["3", "2", "1"],
"tests": {"foo": {
"001.html": {
"results": [[200,"P"]],
"times": [[200,0]]},
"results": [[1,"N"]],
"times": [[1,0]]},
"002.html": {
"results": [[10,"F"]],
"times": [[10,0]]}}},
# Expected results
{"foo": {"001.html":{}}, "002.html":{}})
def test_gtest(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"foo.bar": {
"results": [[50,"F"]],
"times": [[50,0]]},
"foo.bar2": {
"results": [[100,"I"]],
"times": [[100,0]]},
},
"version": 3},
# Incremental results
{"builds": ["3"],
"tests": {"foo.bar2": {
"results": [[1,"I"]],
"times": [[1,0]]},
"foo.bar3": {
"results": [[1,"F"]],
"times": [[1,0]]},
},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"foo.bar": {
"results": [[1, "N"], [50,"F"]],
"times": [[51,0]]},
"foo.bar2": {
"results": [[101,"I"]],
"times": [[101,0]]},
"foo.bar3": {
"results": [[1,"F"]],
"times": [[1,0]]},
},
"version": 4})
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
xin3liang/platform_external_chromium_org | ppapi/generators/idl_diff.py | 180 | 9073 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print 'Adding %s' % self.mode
elif not self.now:
print 'Missing %s' % self.mode
else:
print 'Modifying %s' % self.mode
for line in self.was:
print 'src: >>%s<<' % line
for line in self.now:
print 'gen: >>%s<<' % line
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print "Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline)
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
# print "LINE=%s" % line
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print 'Missing: %s' % name
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print "\n\nDelta between:\n src=%s\n gen=%s\n" % (src, gen)
for change in changes:
change.Dump()
print 'Done with %s\n\n' % src
if GetOption('ok'):
open(diff, 'wt').write(output)
if GetOption('halt'):
return 1
else:
print "\nSAME:\n src=%s\n gen=%s" % (src, gen)
if input: print ' ** Matched expected diff. **'
print '\n'
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
zodiac/incubator-airflow | airflow/operators/redshift_to_s3_operator.py | 9 | 4096 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from airflow.hooks.postgres_hook import PostgresHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class RedshiftToS3Transfer(BaseOperator):
"""
Executes an UNLOAD command to s3 as a CSV with headers
:param schema: reference to a specific schema in redshift database
:type schema: string
:param table: reference to a specific table in redshift database
:type table: string
:param s3_bucket: reference to a specific S3 bucket
:type s3_bucket: string
:param s3_key: reference to a specific S3 key
:type s3_key: string
:param redshift_conn_id: reference to a specific redshift database
:type redshift_conn_id: string
:param s3_conn_id: reference to a specific S3 connection
:type s3_conn_id: string
:param options: reference to a list of UNLOAD options
:type options: list
"""
template_fields = ()
template_ext = ()
ui_color = '#ededed'
@apply_defaults
def __init__(
self,
schema,
table,
s3_bucket,
s3_key,
redshift_conn_id='redshift_default',
s3_conn_id='s3_default',
unload_options=tuple(),
autocommit=False,
parameters=None,
*args, **kwargs):
super(RedshiftToS3Transfer, self).__init__(*args, **kwargs)
self.schema = schema
self.table = table
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.redshift_conn_id = redshift_conn_id
self.s3_conn_id = s3_conn_id
self.unload_options = unload_options
self.autocommit = autocommit
self.parameters = parameters
def execute(self, context):
self.hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
self.s3 = S3Hook(s3_conn_id=self.s3_conn_id)
a_key, s_key = self.s3.get_credentials()
unload_options = ('\n\t\t\t').join(self.unload_options)
logging.info("Retrieving headers from %s.%s..." % (self.schema, self.table))
columns_query = """SELECT column_name
FROM information_schema.columns
WHERE table_schema = '{0}'
AND table_name = '{1}'
ORDER BY ordinal_position
""".format(self.schema, self.table)
cursor = self.hook.get_conn().cursor()
cursor.execute(columns_query)
rows = cursor.fetchall()
columns = map(lambda row: row[0], rows)
column_names = (', ').join(map(lambda c: "\\'{0}\\'".format(c), columns))
column_castings = (', ').join(map(lambda c: "CAST({0} AS text) AS {0}".format(c),
columns))
unload_query = """
UNLOAD ('SELECT {0}
UNION ALL
SELECT {1} FROM {2}.{3}')
TO 's3://{4}/{5}/{3}_'
with
credentials 'aws_access_key_id={6};aws_secret_access_key={7}'
{8};
""".format(column_names, column_castings, self.schema, self.table,
self.s3_bucket, self.s3_key, a_key, s_key, unload_options)
logging.info('Executing UNLOAD command...')
self.hook.run(unload_query, self.autocommit)
logging.info("UNLOAD command complete...")
| apache-2.0 |
jfbelisle/triosante | node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| apache-2.0 |
h3biomed/ansible | lib/ansible/modules/network/aci/aci_fabric_node.py | 27 | 7776 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_fabric_node
short_description: Manage Fabric Node Members (fabric:NodeIdentP)
description:
- Manage Fabric Node Members on Cisco ACI fabrics.
version_added: '2.5'
options:
pod_id:
description:
- The pod id of the new Fabric Node Member.
type: int
serial:
description:
- Serial Number for the new Fabric Node Member.
type: str
aliases: [ serial_number ]
node_id:
description:
- Node ID Number for the new Fabric Node Member.
type: int
switch:
description:
- Switch Name for the new Fabric Node Member.
type: str
aliases: [ name, switch_name ]
description:
description:
- Description for the new Fabric Node Member.
type: str
aliases: [ descr ]
role:
description:
- Role for the new Fabric Node Member.
type: str
aliases: [ role_name ]
choices: [ leaf, spine, unspecified ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fabric:NodeIdentP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
'''
EXAMPLES = r'''
- name: Add fabric node
aci_fabric_node:
host: apic
username: admin
password: SomeSecretPassword
serial: FDO2031124L
node_id: 1011
switch: fab4-sw1011
state: present
delegate_to: localhost
- name: Remove fabric node
aci_fabric_node:
host: apic
username: admin
password: SomeSecretPassword
serial: FDO2031124L
node_id: 1011
state: absent
delegate_to: localhost
- name: Query fabric nodes
aci_fabric_node:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: '?rsp-prop-include=config-only'
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
# NOTE: (This problem is also present on the APIC GUI)
# NOTE: When specifying a C(role) the new Fabric Node Member will be created but Role on GUI will be "unknown", hence not what seems to be a module problem
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
description=dict(type='str', aliases=['descr']),
node_id=dict(type='int'), # Not required for querying all objects
pod_id=dict(type='int'),
role=dict(type='str', choices=['leaf', 'spine', 'unspecified'], aliases=['role_name']),
serial=dict(type='str', aliases=['serial_number']), # Not required for querying all objects
switch=dict(type='str', aliases=['name', 'switch_name']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['node_id', 'serial']],
['state', 'present', ['node_id', 'serial']],
],
)
pod_id = module.params['pod_id']
serial = module.params['serial']
node_id = module.params['node_id']
switch = module.params['switch']
description = module.params['description']
role = module.params['role']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fabricNodeIdentP',
aci_rn='controller/nodeidentpol/nodep-{0}'.format(serial),
module_object=serial,
target_filter={'serial': serial},
)
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fabricNodeIdentP',
class_config=dict(
descr=description,
name=switch,
nodeId=node_id,
podId=pod_id,
# NOTE: Originally we were sending 'rn', but now we need 'dn' for idempotency
# FIXME: Did this change with ACI version ?
dn='uni/controller/nodeidentpol/nodep-{0}'.format(serial),
# rn='nodep-{0}'.format(serial),
role=role,
serial=serial,
)
)
aci.get_diff(aci_class='fabricNodeIdentP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json(**aci.result)
if __name__ == "__main__":
main()
| gpl-3.0 |
tipabu/swift | swift/common/middleware/versioned_writes/legacy.py | 3 | 38701 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. note::
This middleware supports two legacy modes of object versioning that is
now replaced by a new mode. It is recommended to use the new
:ref:`Object Versioning <object_versioning>` mode for new containers.
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The value of the flag is
the URL-encoded container name where the versions are stored (commonly referred
to as the "archive container"). The flag itself is one of two headers, which
determines how object ``DELETE`` requests are handled:
* ``X-History-Location``
On ``DELETE``, copy the current version of the object to the archive
container, write a zero-byte "delete marker" object that notes when the
delete took place, and delete the object from the versioned container. The
object will no longer appear in container listings for the versioned
container and future requests there will return ``404 Not Found``. However,
the content will still be recoverable from the archive container.
* ``X-Versions-Location``
On ``DELETE``, only remove the current version of the object. If any
previous versions exist in the archive container, the most recent one is
copied over the current version, and the copy in the archive container is
deleted. As a result, if you have 5 total versions of the object, you must
delete the object 5 times for that object name to start responding with
``404 Not Found``.
Either header may be used for the various containers within an account, but
only one may be set for any given container. Attempting to set both
simulataneously will result in a ``400 Bad Request`` response.
.. note::
It is recommended to use a different archive container for
each container that is being versioned.
.. note::
Enabling versioning on an archive container is not recommended.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object in the archive container and the data in the ``PUT`` request is
saved as the data for the versioned object. The new object name (for the
previous version) is ``<archive_container>/<length><object_name>/<timestamp>``,
where ``length`` is the 3-character zero-padded hexadecimal length of the
``<object_name>`` and ``<timestamp>`` is the timestamp of when the previous
version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will be handled in one of two ways,
as described above.
To restore a previous version of an object, find the desired version in the
archive container then issue a ``COPY`` with a ``Destination`` header
indicating the original location. This will archive the current version similar
to a ``PUT`` over the versioned object. If the client additionally wishes to
permanently delete what was the current version, it must find the newly-created
archive in the archive container and issue a separate ``DELETE`` to it.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
This middleware was written as an effort to refactor parts of the proxy server,
so this functionality was already available in previous releases and every
attempt was made to maintain backwards compatibility. To allow operators to
perform a seamless upgrade, it is not required to add the middleware to the
proxy pipeline and the flag ``allow_versions`` in the container server
configuration files are still valid, but only when using
``X-Versions-Location``. In future releases, ``allow_versions`` will be
deprecated in favor of adding this middleware to the pipeline to enable or
disable the feature.
In case the middleware is added to the proxy pipeline, you must also
set ``allow_versioned_writes`` to ``True`` in the middleware options
to enable the information about this middleware to be returned in a /info
request.
.. note::
You need to add the middleware to the proxy pipeline and set
``allow_versioned_writes = True`` to use ``X-History-Location``. Setting
``allow_versions = True`` in the container server is not sufficient to
enable the use of ``X-History-Location``.
Upgrade considerations
++++++++++++++++++++++
If ``allow_versioned_writes`` is set in the filter configuration, you can leave
the ``allow_versions`` flag in the container server configuration files
untouched. If you decide to disable or remove the ``allow_versions`` flag, you
must re-set any existing containers that had the ``X-Versions-Location`` flag
configured so that it can now be tracked by the versioned_writes middleware.
Clients should not use the ``X-History-Location`` header until all proxies in
the cluster have been upgraded to a version of Swift that supports it.
Attempting to use ``X-History-Location`` during a rolling upgrade may result
in some requests being served by proxies running old code, leading to data
loss.
----------------------------------------------------
Examples Using ``curl`` with ``X-Versions-Location``
----------------------------------------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone from 'versions' container and back in 'container' container::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
curl -i -XGET -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
---------------------------------------------------
Examples Using ``curl`` with ``X-History-Location``
---------------------------------------------------
As above, create a container with the ``X-History-Location`` header and ensure
that the container referenced by the ``X-History-Location`` exists. In this
example, the name of that container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-History-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now delete the current version of the object. Subsequent requests will 404::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
A listing of the older versions of the object will include both the first and
second versions of the object, as well as a "delete marker" object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
To restore a previous version, simply ``COPY`` it from the archive container::
curl -i -XCOPY -H "X-Auth-Token: <token>" \
http://<storage_url>/versions/008myobject/<timestamp> \
-H "Destination: container/myobject"
Note that the archive container still has all previous versions of the object,
including the source for the restore::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
To permanently delete a previous version, ``DELETE`` it from the archive
container::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/versions/008myobject/<timestamp>
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versioned_writes`` to
``False`` in the middleware options.
Disable versioning from a container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
"""
import calendar
import json
import time
from swift.common.utils import get_logger, Timestamp, \
config_true_value, close_if_possible, FileLikeIter, drain_and_close
from swift.common.request_helpers import get_sys_meta_prefix, \
copy_header_subset
from swift.common.wsgi import WSGIContext, make_pre_authed_request
from swift.common.swob import (
Request, HTTPException, HTTPRequestEntityTooLarge)
from swift.common.constraints import check_container_format, MAX_FILE_SIZE
from swift.proxy.controllers.base import get_container_info
from swift.common.http import (
is_success, is_client_error, HTTP_NOT_FOUND)
from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \
HTTPServerError, HTTPBadRequest, str_to_wsgi, bytes_to_wsgi, wsgi_quote, \
wsgi_unquote
from swift.common.exceptions import (
ListingIterNotFound, ListingIterError)
DELETE_MARKER_CONTENT_TYPE = 'application/x-deleted;swift_versions_deleted=1'
CLIENT_VERSIONS_LOC = 'x-versions-location'
CLIENT_HISTORY_LOC = 'x-history-location'
SYSMETA_VERSIONS_LOC = get_sys_meta_prefix('container') + 'versions-location'
SYSMETA_VERSIONS_MODE = get_sys_meta_prefix('container') + 'versions-mode'
class VersionedWritesContext(WSGIContext):
def __init__(self, wsgi_app, logger):
WSGIContext.__init__(self, wsgi_app)
self.logger = logger
def _listing_iter(self, account_name, lcontainer, lprefix, req):
try:
for page in self._listing_pages_iter(account_name, lcontainer,
lprefix, req):
for item in page:
yield item
except ListingIterNotFound:
pass
except ListingIterError:
raise HTTPServerError(request=req)
def _in_proxy_reverse_listing(self, account_name, lcontainer, lprefix,
req, failed_marker, failed_listing):
'''Get the complete prefix listing and reverse it on the proxy.
This is only necessary if we encounter a response from a
container-server that does not respect the ``reverse`` param
included by default in ``_listing_pages_iter``. This may happen
during rolling upgrades from pre-2.6.0 swift.
:param failed_marker: the marker that was used when we encountered
the non-reversed listing
:param failed_listing: the non-reversed listing that was encountered.
If ``failed_marker`` is blank, we can use this
to save ourselves a request
:returns: an iterator over all objects starting with ``lprefix`` (up
to but not including the failed marker) in reverse order
'''
complete_listing = []
if not failed_marker:
# We've never gotten a reversed listing. So save a request and
# use the failed listing.
complete_listing.extend(failed_listing)
marker = bytes_to_wsgi(complete_listing[-1]['name'].encode('utf8'))
else:
# We've gotten at least one reversed listing. Have to start at
# the beginning.
marker = ''
# First, take the *entire* prefix listing into memory
try:
for page in self._listing_pages_iter(
account_name, lcontainer, lprefix,
req, marker, end_marker=failed_marker, reverse=False):
complete_listing.extend(page)
except ListingIterNotFound:
pass
# Now that we've got everything, return the whole listing as one giant
# reversed page
return reversed(complete_listing)
def _listing_pages_iter(self, account_name, lcontainer, lprefix,
req, marker='', end_marker='', reverse=True):
'''Get "pages" worth of objects that start with a prefix.
The optional keyword arguments ``marker``, ``end_marker``, and
``reverse`` are used similar to how they are for containers. We're
either coming:
- directly from ``_listing_iter``, in which case none of the
optional args are specified, or
- from ``_in_proxy_reverse_listing``, in which case ``reverse``
is ``False`` and both ``marker`` and ``end_marker`` are specified
(although they may still be blank).
'''
while True:
lreq = make_pre_authed_request(
req.environ, method='GET', swift_source='VW',
path=wsgi_quote('/v1/%s/%s' % (account_name, lcontainer)))
lreq.environ['QUERY_STRING'] = \
'prefix=%s&marker=%s' % (wsgi_quote(lprefix),
wsgi_quote(marker))
if end_marker:
lreq.environ['QUERY_STRING'] += '&end_marker=%s' % (
wsgi_quote(end_marker))
if reverse:
lreq.environ['QUERY_STRING'] += '&reverse=on'
lresp = lreq.get_response(self.app)
if not is_success(lresp.status_int):
# errors should be short
drain_and_close(lresp)
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif is_client_error(lresp.status_int):
raise HTTPPreconditionFailed(request=req)
else:
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
# When using the ``reverse`` param, check that the listing is
# actually reversed
first_item = bytes_to_wsgi(sublisting[0]['name'].encode('utf-8'))
last_item = bytes_to_wsgi(sublisting[-1]['name'].encode('utf-8'))
page_is_after_marker = marker and first_item > marker
if reverse and (first_item < last_item or page_is_after_marker):
# Apparently there's at least one pre-2.6.0 container server
yield self._in_proxy_reverse_listing(
account_name, lcontainer, lprefix,
req, marker, sublisting)
return
marker = last_item
yield sublisting
def _get_source_object(self, req, path_info):
# make a pre_auth request in case the user has write access
# to container, but not READ. This was allowed in previous version
# (i.e., before middleware) so keeping the same behavior here
get_req = make_pre_authed_request(
req.environ, path=wsgi_quote(path_info) + '?symlink=get',
headers={'X-Newest': 'True'}, method='GET', swift_source='VW')
source_resp = get_req.get_response(self.app)
if source_resp.content_length is None or \
source_resp.content_length > MAX_FILE_SIZE:
# Consciously *don't* drain the response before closing;
# any logged 499 is actually rather appropriate here
close_if_possible(source_resp.app_iter)
return HTTPRequestEntityTooLarge(request=req)
return source_resp
def _put_versioned_obj(self, req, put_path_info, source_resp):
# Create a new Request object to PUT to the container, copying
# all headers from the source object apart from x-timestamp.
put_req = make_pre_authed_request(
req.environ, path=wsgi_quote(put_path_info), method='PUT',
swift_source='VW')
copy_header_subset(source_resp, put_req,
lambda k: k.lower() != 'x-timestamp')
slo_size = put_req.headers.get('X-Object-Sysmeta-Slo-Size')
if slo_size:
put_req.headers['Content-Type'] += '; swift_bytes=' + slo_size
put_req.environ['swift.content_type_overridden'] = True
put_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter)
put_resp = put_req.get_response(self.app)
# the PUT was responsible for draining
close_if_possible(source_resp.app_iter)
return put_resp
def _check_response_error(self, req, resp):
"""
Raise Error Response in case of error
"""
if is_success(resp.status_int):
return
# any error should be short
drain_and_close(resp)
if is_client_error(resp.status_int):
# missing container or bad permissions
raise HTTPPreconditionFailed(request=req)
# could not version the data, bail
raise HTTPServiceUnavailable(request=req)
def _build_versions_object_prefix(self, object_name):
return '%03x%s/' % (
len(object_name),
object_name)
def _build_versions_object_name(self, object_name, ts):
return ''.join((
self._build_versions_object_prefix(object_name),
Timestamp(ts).internal))
def _copy_current(self, req, versions_cont, api_version, account_name,
object_name):
# validate the write access to the versioned container before
# making any backend requests
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app, swift_source='VW')
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
raise aresp
get_resp = self._get_source_object(req, req.path_info)
if get_resp.status_int == HTTP_NOT_FOUND:
# nothing to version, proceed with original request
drain_and_close(get_resp)
return
# check for any other errors
self._check_response_error(req, get_resp)
# if there's an existing object, then copy it to
# X-Versions-Location
ts_source = get_resp.headers.get(
'x-timestamp',
calendar.timegm(time.strptime(
get_resp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT')))
vers_obj_name = self._build_versions_object_name(
object_name, ts_source)
put_path_info = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont, vers_obj_name)
req.environ['QUERY_STRING'] = ''
put_resp = self._put_versioned_obj(req, put_path_info, get_resp)
self._check_response_error(req, put_resp)
# successful PUT response should be short
drain_and_close(put_resp)
def handle_obj_versions_put(self, req, versions_cont, api_version,
account_name, object_name):
"""
Copy current version of object to versions_container before proceeding
with original request.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param object_name: name of object of original request
"""
self._copy_current(req, versions_cont, api_version, account_name,
object_name)
return self.app
def handle_obj_versions_delete_push(self, req, versions_cont, api_version,
account_name, container_name,
object_name):
"""
Handle DELETE requests when in history mode.
Copy current version of object to versions_container and write a
delete marker before proceeding with original request.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param object_name: name of object of original request
"""
self._copy_current(req, versions_cont, api_version, account_name,
object_name)
marker_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont,
self._build_versions_object_name(object_name, time.time()))
marker_headers = {
# Definitive source of truth is Content-Type, and since we add
# a swift_* param, we know users haven't set it themselves.
# This is still open to users POSTing to update the content-type
# but they're just shooting themselves in the foot then.
'content-type': DELETE_MARKER_CONTENT_TYPE,
'content-length': '0',
'x-auth-token': req.headers.get('x-auth-token')}
marker_req = make_pre_authed_request(
req.environ, path=wsgi_quote(marker_path),
headers=marker_headers, method='PUT', swift_source='VW')
marker_req.environ['swift.content_type_overridden'] = True
marker_resp = marker_req.get_response(self.app)
self._check_response_error(req, marker_resp)
drain_and_close(marker_resp)
# successfully copied and created delete marker; safe to delete
return self.app
def _restore_data(self, req, versions_cont, api_version, account_name,
container_name, object_name, prev_obj_name):
get_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont, prev_obj_name)
get_resp = self._get_source_object(req, get_path)
# if the version isn't there, keep trying with previous version
if get_resp.status_int == HTTP_NOT_FOUND:
drain_and_close(get_resp)
return False
self._check_response_error(req, get_resp)
put_path_info = "/%s/%s/%s/%s" % (
api_version, account_name, container_name, object_name)
put_resp = self._put_versioned_obj(req, put_path_info, get_resp)
self._check_response_error(req, put_resp)
drain_and_close(put_resp)
return get_path
def handle_obj_versions_delete_pop(self, req, versions_cont, api_version,
account_name, container_name,
object_name):
"""
Handle DELETE requests when in stack mode.
Delete current version of object and pop previous version in its place.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param container_name: container name.
:param object_name: object name.
"""
listing_prefix = self._build_versions_object_prefix(object_name)
item_iter = self._listing_iter(account_name, versions_cont,
listing_prefix, req)
auth_token_header = {'X-Auth-Token': req.headers.get('X-Auth-Token')}
authed = False
for previous_version in item_iter:
if not authed:
# validate the write access to the versioned container before
# making any backend requests
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app, swift_source='VW')
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
authed = True
if previous_version['content_type'] == DELETE_MARKER_CONTENT_TYPE:
# check whether we have data in the versioned container
obj_head_headers = {'X-Newest': 'True'}
obj_head_headers.update(auth_token_header)
head_req = make_pre_authed_request(
req.environ, path=wsgi_quote(req.path_info), method='HEAD',
headers=obj_head_headers, swift_source='VW')
hresp = head_req.get_response(self.app)
drain_and_close(hresp)
if hresp.status_int != HTTP_NOT_FOUND:
self._check_response_error(req, hresp)
# if there's an existing object, then just let the delete
# through (i.e., restore to the delete-marker state):
break
# no data currently in the container (delete marker is current)
for version_to_restore in item_iter:
if version_to_restore['content_type'] == \
DELETE_MARKER_CONTENT_TYPE:
# Nothing to restore
break
obj_to_restore = bytes_to_wsgi(
version_to_restore['name'].encode('utf-8'))
req.environ['QUERY_STRING'] = ''
restored_path = self._restore_data(
req, versions_cont, api_version, account_name,
container_name, object_name, obj_to_restore)
if not restored_path:
continue
old_del_req = make_pre_authed_request(
req.environ, path=wsgi_quote(restored_path),
method='DELETE', headers=auth_token_header,
swift_source='VW')
del_resp = old_del_req.get_response(self.app)
drain_and_close(del_resp)
if del_resp.status_int != HTTP_NOT_FOUND:
self._check_response_error(req, del_resp)
# else, well, it existed long enough to do the
# copy; we won't worry too much
break
prev_obj_name = bytes_to_wsgi(
previous_version['name'].encode('utf-8'))
marker_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont,
prev_obj_name)
# done restoring, redirect the delete to the marker
req = make_pre_authed_request(
req.environ, path=wsgi_quote(marker_path), method='DELETE',
headers=auth_token_header, swift_source='VW')
else:
# there are older versions so copy the previous version to the
# current object and delete the previous version
prev_obj_name = bytes_to_wsgi(
previous_version['name'].encode('utf-8'))
req.environ['QUERY_STRING'] = ''
restored_path = self._restore_data(
req, versions_cont, api_version, account_name,
container_name, object_name, prev_obj_name)
if not restored_path:
continue
# redirect the original DELETE to the source of the reinstated
# version object - we already auth'd original req so make a
# pre-authed request
req = make_pre_authed_request(
req.environ, path=wsgi_quote(restored_path),
method='DELETE', headers=auth_token_header,
swift_source='VW')
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
break
# handle DELETE request here in case it was modified
return req.get_response(self.app)
def handle_container_request(self, env, start_response):
app_resp = self._app_call(env)
if self._response_headers is None:
self._response_headers = []
mode = location = ''
for key, val in self._response_headers:
if key.lower() == SYSMETA_VERSIONS_LOC:
location = val
elif key.lower() == SYSMETA_VERSIONS_MODE:
mode = val
if location:
if mode == 'history':
self._response_headers.extend([
(CLIENT_HISTORY_LOC.title(), location)])
else:
self._response_headers.extend([
(CLIENT_VERSIONS_LOC.title(), location)])
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class VersionedWritesMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='versioned_writes')
def container_request(self, req, start_response, enabled):
if CLIENT_VERSIONS_LOC in req.headers and \
CLIENT_HISTORY_LOC in req.headers:
if not req.headers[CLIENT_HISTORY_LOC]:
# defer to versions location entirely
del req.headers[CLIENT_HISTORY_LOC]
elif req.headers[CLIENT_VERSIONS_LOC]:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='Only one of %s or %s may be specified' % (
CLIENT_VERSIONS_LOC, CLIENT_HISTORY_LOC))
else:
# history location is present and versions location is
# present but empty -- clean it up
del req.headers[CLIENT_VERSIONS_LOC]
if CLIENT_VERSIONS_LOC in req.headers or \
CLIENT_HISTORY_LOC in req.headers:
if CLIENT_VERSIONS_LOC in req.headers:
val = req.headers[CLIENT_VERSIONS_LOC]
mode = 'stack'
else:
val = req.headers[CLIENT_HISTORY_LOC]
mode = 'history'
if not val:
# empty value is the same as X-Remove-Versions-Location
req.headers['X-Remove-Versions-Location'] = 'x'
elif not config_true_value(enabled) and \
req.method in ('PUT', 'POST'):
# differently from previous version, we are actually
# returning an error if user tries to set versions location
# while feature is explicitly disabled.
raise HTTPPreconditionFailed(
request=req, content_type='text/plain',
body='Versioned Writes is disabled')
else:
# OK, we received a value, have versioning enabled, and aren't
# trying to set two modes at once. Validate the value and
# translate to sysmeta.
location = check_container_format(req, val)
req.headers[SYSMETA_VERSIONS_LOC] = location
req.headers[SYSMETA_VERSIONS_MODE] = mode
# reset original header on container server to maintain sanity
# now only sysmeta is source of Versions Location
req.headers[CLIENT_VERSIONS_LOC] = ''
# if both add and remove headers are in the same request
# adding location takes precedence over removing
for header in ['X-Remove-Versions-Location',
'X-Remove-History-Location']:
if header in req.headers:
del req.headers[header]
if any(req.headers.get(header) for header in [
'X-Remove-Versions-Location',
'X-Remove-History-Location']):
req.headers.update({CLIENT_VERSIONS_LOC: '',
SYSMETA_VERSIONS_LOC: '',
SYSMETA_VERSIONS_MODE: ''})
for header in ['X-Remove-Versions-Location',
'X-Remove-History-Location']:
if header in req.headers:
del req.headers[header]
# send request and translate sysmeta headers from response
vw_ctx = VersionedWritesContext(self.app, self.logger)
return vw_ctx.handle_container_request(req.environ, start_response)
def object_request(self, req, api_version, account, container, obj,
allow_versioned_writes):
"""
Handle request for object resource.
Note that account, container, obj should be unquoted by caller
if the url path is under url encoding (e.g. %FF)
:param req: swift.common.swob.Request instance
:param api_version: should be v1 unless swift bumps api version
:param account: account name string
:param container: container name string
:param object: object name string
"""
resp = None
is_enabled = config_true_value(allow_versioned_writes)
container_info = get_container_info(
req.environ, self.app, swift_source='VW')
# To maintain backwards compatibility, container version
# location could be stored as sysmeta or not, need to check both.
# If stored as sysmeta, check if middleware is enabled. If sysmeta
# is not set, but versions property is set in container_info, then
# for backwards compatibility feature is enabled.
versions_cont = container_info.get(
'sysmeta', {}).get('versions-location')
versioning_mode = container_info.get(
'sysmeta', {}).get('versions-mode', 'stack')
if not versions_cont:
versions_cont = container_info.get('versions')
# if allow_versioned_writes is not set in the configuration files
# but 'versions' is configured, enable feature to maintain
# backwards compatibility
if not allow_versioned_writes and versions_cont:
is_enabled = True
if is_enabled and versions_cont:
versions_cont = wsgi_unquote(str_to_wsgi(
versions_cont)).split('/')[0]
vw_ctx = VersionedWritesContext(self.app, self.logger)
if req.method == 'PUT':
resp = vw_ctx.handle_obj_versions_put(
req, versions_cont, api_version, account,
obj)
# handle DELETE
elif versioning_mode == 'history':
resp = vw_ctx.handle_obj_versions_delete_push(
req, versions_cont, api_version, account,
container, obj)
else:
resp = vw_ctx.handle_obj_versions_delete_pop(
req, versions_cont, api_version, account,
container, obj)
if resp:
return resp
else:
return self.app
def __call__(self, env, start_response):
req = Request(env)
try:
(api_version, account, container, obj) = req.split_path(3, 4, True)
is_cont_or_obj_req = True
except ValueError:
is_cont_or_obj_req = False
if not is_cont_or_obj_req:
return self.app(env, start_response)
# In case allow_versioned_writes is set in the filter configuration,
# the middleware becomes the authority on whether object
# versioning is enabled or not. In case it is not set, then
# the option in the container configuration is still checked
# for backwards compatibility
# For a container request, first just check if option is set,
# can be either true or false.
# If set, check if enabled when actually trying to set container
# header. If not set, let request be handled by container server
# for backwards compatibility.
# For an object request, also check if option is set (either T or F).
# If set, check if enabled when checking versions container in
# sysmeta property. If it is not set check 'versions' property in
# container_info
allow_versioned_writes = self.conf.get('allow_versioned_writes')
if allow_versioned_writes and container and not obj:
try:
return self.container_request(req, start_response,
allow_versioned_writes)
except HTTPException as error_response:
return error_response(env, start_response)
elif (obj and req.method in ('PUT', 'DELETE')):
try:
return self.object_request(
req, api_version, account, container, obj,
allow_versioned_writes)(env, start_response)
except HTTPException as error_response:
return error_response(env, start_response)
else:
return self.app(env, start_response)
| apache-2.0 |
mxamin/youtube-dl | youtube_dl/extractor/go.py | 10 | 4841 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
determine_ext,
parse_age_limit,
urlencode_postdata,
ExtractorError,
)
class GoIE(InfoExtractor):
_BRANDS = {
'abc': '001',
'freeform': '002',
'watchdisneychannel': '004',
'watchdisneyjunior': '008',
'watchdisneyxd': '009',
}
_VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_BRANDS.keys())
_TESTS = [{
'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx',
'info_dict': {
'id': '0_g86w5onx',
'ext': 'mp4',
'title': 'Sneak Peek: Language Arts',
'description': 'md5:7dcdab3b2d17e5217c953256af964e9c',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abc.go.com/shows/after-paradise/video/most-recent/vdka3335601',
'only_matching': True,
}]
def _real_extract(self, url):
sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'data-video-id=["\']VDKA(\w+)', webpage, 'video id')
brand = self._BRANDS[sub_domain]
video_data = self._download_json(
'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/-1/-1/%s/-1/-1.json' % (brand, video_id),
video_id)['video'][0]
title = video_data['title']
formats = []
for asset in video_data.get('assets', {}).get('asset', []):
asset_url = asset.get('value')
if not asset_url:
continue
format_id = asset.get('format')
ext = determine_ext(asset_url)
if ext == 'm3u8':
video_type = video_data.get('type')
if video_type == 'lf':
entitlement = self._download_json(
'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json',
video_id, data=urlencode_postdata({
'video_id': video_data['id'],
'video_type': video_type,
'brand': brand,
'device': '001',
}))
errors = entitlement.get('errors', {}).get('errors', [])
if errors:
error_message = ', '.join([error['message'] for error in errors])
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
asset_url += '?' + entitlement['uplynkData']['sessionKey']
formats.extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False))
else:
formats.append({
'format_id': format_id,
'url': asset_url,
'ext': ext,
})
self._sort_formats(formats)
subtitles = {}
for cc in video_data.get('closedcaption', {}).get('src', []):
cc_url = cc.get('value')
if not cc_url:
continue
ext = determine_ext(cc_url)
if ext == 'xml':
ext = 'ttml'
subtitles.setdefault(cc.get('lang'), []).append({
'url': cc_url,
'ext': ext,
})
thumbnails = []
for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []):
thumbnail_url = thumbnail.get('value')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('longdescription') or video_data.get('description'),
'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000),
'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')),
'episode_number': int_or_none(video_data.get('episodenumber')),
'series': video_data.get('show', {}).get('title'),
'season_number': int_or_none(video_data.get('season', {}).get('num')),
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
qPCR4vir/orange | Orange/testing/unit/tests/test_name.py | 6 | 1898 | import orange
import Orange
import unittest
class TestName(unittest.TestCase):
def test_Learner(self):
b = orange.BayesLearner()
self.assertEqual(b.name, "bayes")
b.name = "foo"
self.assertEqual(b.name, "foo")
b.name = "BayesLearner"
self.assertEqual(b.name, "BayesLearner")
b.name = "x.BayesLearner"
self.assertEqual(b.name, "x.BayesLearner")
b.name = ""
self.assertEqual(b.name, "")
def test_class(self):
class MyBla(orange.BayesLearner):
pass
b = MyBla()
self.assertEqual(b.name, "myBla")
b.name = "foo"
self.assertEqual(b.name, "foo")
def test_classLearner(self):
class MyBlaLearner(orange.BayesLearner):
pass
b = MyBlaLearner()
self.assertEqual(b.name, "myBla")
def test_class_short(self):
class A(orange.BayesLearner):
pass
b = A()
self.assertEqual(b.name, "a")
b.name = "foo"
self.assertEqual(b.name, "foo")
def test_Discretizer(self):
b = orange.EquiDistDiscretizer()
# The class is renamed internally
# "Discretizer" is removed and E is changed to e
self.assertEqual(b.name, "equalWidth")
def test_Classifier(self):
b = orange.TreeClassifier()
self.assertEqual(b.name, "tree")
def test_Orange(self):
b = Orange.classification.bayes.NaiveLearner()
self.assertEqual(b.name, "naive")
def test_static_name(self):
# Tests that class attributes work and are left
# (stripping off 'Learner' and lower cases are
# applied only to class names
class NaiveLearner(orange.BayesLearner):
name = "BayesLearner"
b = NaiveLearner()
self.assertEqual(b.name, "BayesLearner")
if __name__ == "__main__":
unittest.main() | gpl-3.0 |
DhashS/scala_comp_robo_sign_detection | src/main/venv/lib/python3.5/site-packages/pip/vcs/subversion.py | 343 | 9350 | from __future__ import absolute_import
import logging
import os
import re
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.index import Link
from pip.utils import rmtree, display_path
from pip.utils.logging import indent_log
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
output = self.run_command(
['info', location],
show_stdout=False,
extra_environ={'LANG': 'C'},
)
match = _svn_url_re.search(output)
if not match:
logger.warning(
'Cannot determine URL of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warning(
'Cannot determine revision of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return url, None
return url, match.group(1)
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
url = self.remove_auth_from_url(url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
self.run_command(
['export'] + rev_options + [url, location],
show_stdout=False)
def switch(self, dest, url, rev_options):
self.run_command(['switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
self.run_command(['update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
url = self.remove_auth_from_url(url)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
# FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
entries_path = os.path.join(location, self.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = self.run_command(
['info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if repo is None:
return None
# FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
@staticmethod
def remove_auth_from_url(url):
# Return a copy of url with 'username:password@' removed.
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
# parsed url
purl = urllib_parse.urlsplit(url)
stripped_netloc = \
purl.netloc.split('@')[-1]
# stripped url
url_pieces = (
purl.scheme, stripped_netloc, purl.path, purl.query, purl.fragment
)
surl = urllib_parse.urlunsplit(url_pieces)
return surl
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urllib_parse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| gpl-3.0 |
chaeplin/dashmnb | dashlib/dash_keys.py | 1 | 3009 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
import time
import random
import binascii
import hashlib
from config import *
from dash_hashs import *
from dash_utils import *
from dash_b58 import *
from dash_jacobian import *
long = int
_bchr = lambda x: bytes([x])
_bord = lambda x: x
def random_string(x):
return str(os.urandom(x))
def random_key():
entropy = random_string(32) \
+ str(random.randrange(2**256)) \
+ str(int(time.time() * 1000000))
return sha256(entropy)
def decode_hexto_int(string):
return int.from_bytes(bytes.fromhex(string), byteorder='big')
def get_random_key():
valid_private_key = False
while not valid_private_key:
private_key = random_key()
decoded_private_key = decode_hexto_int(private_key)
valid_private_key = 0 < decoded_private_key < N
return {
'privkey': private_key,
'privkey_decoded': decoded_private_key
}
def pubkey_to_address(string):
#data = binascii.unhexlify(string)
data = bytes.fromhex(string)
data_hash = Hash160(data)
vs = _bchr(addr_prefix) + data_hash
check = double_sha256(vs)[0:4]
return b58encode(vs + check)
def private_key_to_wif(string, compressed=False):
if compressed:
#prv = binascii.unhexlify(string + '01')
prv = bytes.fromhex(string + '01')
else:
#prv = binascii.unhexlify(string)
prv = bytes.fromhex(string)
vs = _bchr(wif_prefix) + prv
check = double_sha256(vs)[0:4]
return b58encode(vs + check)
def wif_to_privkey(string):
wif_compressed = 52 == len(string)
pvkeyencoded = b58decode(string).hex()
wifversion = pvkeyencoded[:2]
checksum = pvkeyencoded[-8:]
#vs = binascii.unhexlify(pvkeyencoded[:-8])
vs = bytes.fromhex(pvkeyencoded[:-8])
check = double_sha256(vs)[0:4]
if wifversion == wif_prefix.to_bytes(
1, byteorder='big').hex() and checksum == check.hex():
if wif_compressed:
compressed = True
privkey = pvkeyencoded[2:-10]
else:
compressed = False
privkey = pvkeyencoded[2:-8]
return {
'compressed': compressed,
'privkey': privkey
}
else:
return None
def get_public_key(string): # from private_key
decoded_private_key = decode_hexto_int(string)
public_key = fast_multiply(G, decoded_private_key)
hex_encoded_public_key = str('04') + public_key[0].to_bytes(
32, byteorder='big').hex() + public_key[1].to_bytes(32, byteorder='big').hex()
(public_key_x, public_key_y) = public_key
if (public_key_y % 2) == 0:
compressed_prefix = '02'
else:
compressed_prefix = '03'
hex_compressed_public_key = compressed_prefix + \
public_key_x.to_bytes(32, byteorder='big').hex()
return {
'pubkeyhex': hex_encoded_public_key,
'pubkeyhex_compressed': hex_compressed_public_key
}
#
| mit |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/django/core/management/utils.py | 405 | 2590 | from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.utils import six
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, universal_newlines=True):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE,
close_fds=os.name != 'nt', universal_newlines=universal_newlines)
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING,
strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
output,
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
| gpl-3.0 |
cboling/SDNdbg | docs/old-stuff/pydzcvr/doc/neutron/policy.py | 6 | 18897 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import collections
import itertools
import logging
import re
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
import neutron.common.utils as utils
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log
from neutron.openstack.common import policy
LOG = log.getLogger(__name__)
_POLICY_PATH = None
_POLICY_CACHE = {}
ADMIN_CTX_POLICY = 'context_is_admin'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:vif_details',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
cfg.CONF.import_opt('policy_file', 'neutron.common.config')
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file)
if not _POLICY_PATH:
raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file)
# pass _set_brain to read_cached_file so that the policy brain
# is reset only if the file has changed
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def get_resource_and_action(action):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
return ("%ss" % data[-1], data[0] != 'get')
def _set_rules(data):
default_rule = 'default'
LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
policies = policy.Rules.load_json(data, default_rule)
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warn(_LW("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_LI("Inserting policy:%(new_policy)s in "
"place of deprecated "
"policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_LE("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
policy.set_rules(policies)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in validate.iteritems()]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug(_("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s."),
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
# Check that the logger has a DEBUG log level
if (cfg.CONF.debug and LOG.logger.level == logging.NOTSET or
LOG.logger.level == logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall('^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s."
"match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug(_("Unable to find ':' as separator in %s."),
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# FIXME(ihrachys): if import is put in global, circular
# import failure occurs
from neutron import manager
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug(_("Unable to find requested field: %(field)s in "
"target: %(target_dict)s"),
{'field': self.field,
'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target)
credentials = context.to_dict()
return match_rule, target, credentials
def check(context, action, target, plugin=None, might_not_exist=False):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:return: Returns True if access is permitted else False.
"""
if might_not_exist and not (policy._rules and action in policy._rules):
return True
return policy.check(*(_prepare_check(context, action, target)))
def enforce(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:raises neutron.exceptions.PolicyNotAuthorized: if verification fails.
"""
rule, target, credentials = _prepare_check(context, action, target)
result = policy.check(rule, target, credentials, action=action)
if not result:
LOG.debug(_("Failed policy check for '%s'"), action)
raise exceptions.PolicyNotAuthorized(action=action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY in policy._rules
and ADMIN_CTX_POLICY or 'role:admin')
return policy.check(admin_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(policy._rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not policy._rules or ADMIN_CTX_POLICY not in policy._rules:
return ['admin']
try:
admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
| apache-2.0 |
thypon/bowser-kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
jakobharlan/avango | examples/examples_common/GuaVE.py | 3 | 3609 | import avango
import avango.script
import code
import threading
import sys
import signal
import queue
import os
try:
import pyreadline as readline
except ImportError:
import readline
import rlcompleter
print_green = '\033[1;32m'
print_reset = '\033[0m'
class GuaVE(avango.script.Script):
Prompt = avango.SFString()
HistoryFile = avango.SFString()
def __init__(self):
self.super(GuaVE).__init__()
self.always_evaluate(True)
self.__input_queue = queue.Queue()
self.__input_lock = threading.Lock()
self.__vars = {}
self.Prompt.value = "gua> "
self.HistoryFile.value = os.path.expanduser("~/.guahistory")
def start(self, locals, globals, show_banner=True):
self.__vars = globals.copy()
self.__vars.update(locals)
self.__shell = code.InteractiveConsole(self.__vars)
print("")
if show_banner:
print("")
print(" _ ")
print(" | | ")
print(" __ _ _ _ __ _ ___ __ _ _ __ ___ ___ | | ___ ")
print(" / _` | | | |/ _` |/ __/ _` | '_ ` _ \ / _ \| |/ _ \ ")
print("| (_| | |_| | (_| | (_| (_| | | | | | | (_) | | __/ ")
print(" \__, |\__,_|\__,_|\___\__,_|_| |_| |_|\___/|_|\___| ")
print(" __/ | ")
print(" |___/ ")
print("")
print("")
print("")
print(print_green +
"Welcome to GuaVE, the guacamole virtual environment!" +
print_reset)
print("")
print("Press Ctrl-D to exit to exit GuaVE.")
print("----------------------------------------------------")
self.__input_thread = threading.Thread(target=self.__read_input)
self.__input_thread.daemon = True
self.__input_thread.start()
signal.signal(signal.SIGINT, self.__signal_handler)
def list_variables(self):
vars = self.__vars.keys()
vars.sort()
for v in vars:
print(v)
def evaluate(self):
while (not self.__input_queue.empty()):
# clear line
sys.stdout.write('\r\033[2K')
sys.stdout.flush()
self.__shell.push(self.__input_queue.get())
#write new prompt
sys.stdout.write(print_green + self.Prompt.value + print_reset)
sys.stdout.flush()
readline.write_history_file(self.HistoryFile.value)
def __signal_handler(self, signal, frame):
print("Bye!")
sys.exit(0)
def __read_input(self):
readline.set_completer(rlcompleter.Completer(self.__vars).complete)
readline.parse_and_bind("tab: complete")
if os.path.exists(self.HistoryFile.value):
readline.read_history_file(self.HistoryFile.value)
while (True):
try:
line = input('\001' + print_green + '\002' + self.Prompt.value
+ '\001' + print_reset + '\002')
self.__input_queue.put(line)
except EOFError:
print("Bye") #, press Ctrl-C to kill guacamole...")
os._exit(0)
except IOError as err:
print("I/O error: {0}".format(err))
os._exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
os._exit(1)
| lgpl-3.0 |
Rudloff/youtube-dl | youtube_dl/extractor/dplay.py | 20 | 6579 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
import time
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
update_url_query,
)
class DPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?P<domain>it\.dplay\.com|www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)'
_TESTS = [{
# geo restricted, via direct unsigned hls URL
'url': 'http://it.dplay.com/take-me-out/stagione-1-episodio-25/',
'info_dict': {
'id': '1255600',
'display_id': 'stagione-1-episodio-25',
'ext': 'mp4',
'title': 'Episodio 25',
'description': 'md5:cae5f40ad988811b197d2d27a53227eb',
'duration': 2761,
'timestamp': 1454701800,
'upload_date': '20160205',
'creator': 'RTIT',
'series': 'Take me out',
'season_number': 1,
'episode_number': 25,
'age_limit': 0,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
# non geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
'info_dict': {
'id': '3172',
'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
'ext': 'mp4',
'title': 'Svensken lär sig njuta av livet',
'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8',
'duration': 2650,
'timestamp': 1365454320,
'upload_date': '20130408',
'creator': 'Kanal 5 (Home)',
'series': 'Nugammalt - 77 händelser som format Sverige',
'season_number': 1,
'episode_number': 1,
'age_limit': 0,
},
}, {
# geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/',
'info_dict': {
'id': '70816',
'display_id': 'season-6-episode-12',
'ext': 'mp4',
'title': 'Episode 12',
'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90',
'duration': 2563,
'timestamp': 1429696800,
'upload_date': '20150422',
'creator': 'Kanal 4 (Home)',
'series': 'Mig og min mor',
'season_number': 6,
'episode_number': 12,
'age_limit': 0,
},
}, {
# geo restricted, via direct unsigned hls URL
'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
domain = mobj.group('domain')
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-video-id=["\'](\d+)', webpage, 'video id')
info = self._download_json(
'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id),
video_id)['data'][0]
title = info['title']
PROTOCOLS = ('hls', 'hds')
formats = []
def extract_formats(protocol, manifest_url):
if protocol == 'hls':
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False)
# Sometimes final URLs inside m3u8 are unsigned, let's fix this
# ourselves
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(manifest_url).query)
for m3u8_format in m3u8_formats:
m3u8_format['url'] = update_url_query(m3u8_format['url'], query)
formats.extend(m3u8_formats)
elif protocol == 'hds':
formats.extend(self._extract_f4m_formats(
manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0',
video_id, f4m_id=protocol, fatal=False))
domain_tld = domain.split('.')[-1]
if domain_tld in ('se', 'dk', 'no'):
for protocol in PROTOCOLS:
# Providing dsc-geo allows to bypass geo restriction in some cases
self._set_cookie(
'secure.dplay.%s' % domain_tld, 'dsc-geo',
json.dumps({
'countryCode': domain_tld.upper(),
'expiry': (time.time() + 20 * 60) * 1000,
}))
stream = self._download_json(
'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s'
% (domain_tld, video_id, protocol), video_id,
'Downloading %s stream JSON' % protocol, fatal=False)
if stream and stream.get(protocol):
extract_formats(protocol, stream[protocol])
# The last resort is to try direct unsigned hls/hds URLs from info dictionary.
# Sometimes this does work even when secure API with dsc-geo has failed (e.g.
# http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/).
if not formats:
for protocol in PROTOCOLS:
if info.get(protocol):
extract_formats(protocol, info[protocol])
self._sort_formats(formats)
subtitles = {}
for lang in ('se', 'sv', 'da', 'nl', 'no'):
for format_id in ('web_vtt', 'vtt', 'srt'):
subtitle_url = info.get('subtitles_%s_%s' % (lang, format_id))
if subtitle_url:
subtitles.setdefault(lang, []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': info.get('video_metadata_longDescription'),
'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
'timestamp': int_or_none(info.get('video_publish_date')),
'creator': info.get('video_metadata_homeChannel'),
'series': info.get('video_metadata_show'),
'season_number': int_or_none(info.get('season')),
'episode_number': int_or_none(info.get('episode')),
'age_limit': int_or_none(info.get('minimum_age')),
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
RyanHope/PyeMovements | crisp.py | 1 | 8615 | #!/usr/bin/env python2
#=====================#
# crisp.py #
#=====================#
import sys,os
import itertools
import types
import simpy
import numpy as np
class Timer(object):
__alias__ = "timer"
def __init__(self, env, labile, mean=.250, states=11, start_state=0, rate=1.0):
self.env = env
self.labile = labile
self.setStates(states)
self.setMean(mean)
self.setRate(rate)
if start_state == -1:
self.start_state = np.random.randint(self.states)
elif start_state >= self.states:
self.start_state = self.states - 1
else:
self.start_state = start_state
self.process = env.process(self.run())
def setStates(self, states):
self.states = states
self.env.log(0, self.__alias__, "set_states", self.states)
def setMean(self, mean):
self.mean = mean
self.env.log(0, self.__alias__, "set_mean", self.mean)
def setRate(self, rate):
self.rate = rate
self.env.log(0, self.__alias__, "set_rate", self.rate)
def next_state(self):
d = -(1/((self.states/self.mean)*self.rate))
yield self.env.timeout(d*np.log(1-np.random.uniform()))
def run(self):
for i in itertools.count(1):
for j in itertools.count(self.start_state):
if j < self.states:
yield self.env.process(self.next_state())
self.env.log(i, self.__alias__, "next_state", j, self.states)
else:
break
self.env.log(i, self.__alias__, "reset")
self.setRate(1.0)
self.labile.process.interrupt(i)
self.start_state = 0
class LabileProg(object):
__alias__ = "labile_programming"
def __init__(self, env, nonlabile, attn=None, mean=.180, stdev=3, alpha=1):
self.env = env
self.nonlabile = nonlabile
self.attn = attn
self.setAlpha(alpha)
self.setMean(mean)
self.setStdev(stdev)
self.next_event = 0
self.process = env.process(self.run())
self.restarts = 0
self.target = 0
self.spid = -1 # since env could trigger cancel
def getTarget(self):
pass
def setAlpha(self, alpha):
self.alpha = alpha
self.env.log(0, self.__alias__, "set_alpha", self.alpha)
def setMean(self, mean):
self.mean = mean
self.env.log(0, self.__alias__, "set_mean", self.mean)
def setStdev(self, stdev):
self.stdev = self.mean/stdev
self.env.log(0, self.__alias__, "set_stdev", self.stdev)
def run(self):
while True:
if self.next_event == 0: self.next_event = simpy.core.Infinity
while self.next_event:
try:
self.getTarget()
yield self.env.timeout(self.next_event)
self.next_event = 0
except simpy.Interrupt as e:
if e.cause == -1:
self.env.log(self.spid, self.__alias__, "canceled")
self.next_event = simpy.core.Infinity
else:
if self.next_event < simpy.core.Infinity:
self.env.log(self.spid, self.__alias__, "restarted")
self.restarts += 1
self.spid = e.cause
mm = self.mean*self.mean
ss = self.stdev*self.stdev
self.next_event = np.random.gamma(mm/ss,ss/self.mean)
self.env.log(self.spid, self.__alias__, "started")
#self.getTarget()
self.env.log(self.spid, self.__alias__, "complete", self.restarts, self.target)
self.restarts = 0
self.nonlabile.process.interrupt((self.spid,self.target))
class NonLabileProg(object):
__alias__ = "nonlabile_programming"
def __init__(self, env, sp, mean=.040, stdev=3):
self.env = env
self.sp = sp
self.setMean(mean)
self.setStdev(stdev)
self.next_event = 0
self.process = env.process(self.run())
self.restarts = 0
def setMean(self, mean):
self.mean = mean
self.env.log(0, self.__alias__, "set_mean", self.mean)
def setStdev(self, stdev):
self.stdev = self.mean/stdev
self.env.log(0, self.__alias__, "set_stdev", self.stdev)
def run(self):
while True:
if self.next_event == 0: self.next_event = simpy.core.Infinity
while self.next_event:
try:
yield self.env.timeout(self.next_event)
self.next_event = 0
except simpy.Interrupt as e:
if self.next_event < simpy.core.Infinity:
self.env.log(self.spid, self.__alias__, "restarted")
self.restarts += 1
self.spid, self.target = e.cause
mm = self.mean*self.mean
ss = self.stdev*self.stdev
self.next_event = np.random.gamma(mm/ss,ss/self.mean)
self.env.log(self.spid, self.__alias__, "started", self.target)
self.env.log(self.spid, self.__alias__, "complete", self.target)
self.restarts = 0
self.sp.process.interrupt((self.spid, self.target))
class SaccadeExec(object):
__alias__ = "saccade_execution"
def __init__(self, env, pv, mean=.040, stdev=3):
self.env = env
self.pv = pv
self.setMean(mean)
self.setStdev(stdev)
self.next_event = 0
self.process = env.process(self.run())
self.saccades = 0
self.mergers = 0
self.setPosition()
def setPosition(self):
self.position = 0
def setMean(self, mean):
self.mean = mean
self.env.log(0, self.__alias__, "set_mean", self.mean)
def setStdev(self, stdev):
self.stdev = self.mean/stdev
self.env.log(0, self.__alias__, "set_stdev", self.stdev)
def run(self):
while True:
if self.next_event == 0: self.next_event = simpy.core.Infinity
while self.next_event:
try:
yield self.env.timeout(self.next_event)
self.next_event = 0
except simpy.Interrupt as e:
self.spid, self.position = e.cause
if self.next_event < simpy.core.Infinity:
self.env.log(self.spid, self.__alias__, "merged")
self.mergers += 1
mm = self.mean*self.mean
ss = self.stdev*self.stdev
self.next_event = np.random.gamma(mm/ss,ss/self.mean)
self.saccades += 1
self.pv.process.interrupt((self.spid, True))
self.env.log(self.spid, self.__alias__, "started",
self.mergers, self.saccades, self.position)
self.setPosition()
self.env.log(self.spid, self.__alias__, "complete",
self.mergers, self.saccades, self.position)
self.mergers = 0
self.pv.process.interrupt((self.spid, False))
class ProcessVision(object):
def __init__(self, env):
self.env = env
self.process = env.process(self.run())
self.fixations = 1
def run(self):
self.env.log(1, "fixation", "started")
while True:
try:
yield self.env.timeout(simpy.core.Infinity)
except simpy.Interrupt as e:
if e.cause[1]:
self.env.log(-1, "fixation", "complete", self.fixations)
else:
self.fixations += 1
self.env.log(-1, "fixation", "started", self.fixations)
class CRISPEnvironment(simpy.Environment):
def __init__(self, args, initial_time=0.0):
super(CRISPEnvironment, self).__init__(initial_time)
self.debug = False
self.stop = -1
def log(self, *args):
if self.stop==-1:
e = [-1] + list(args)
else:
e = [self.now] + list(args)
if self.efun(e): self.stop = True
if self.debug:
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.stderr.flush()
return e
def run_while(self, efun):
self.efun = efun
self.stop = 0
while self.stop==0: self.step()
| gpl-3.0 |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/predictor/saved_model_predictor.py | 55 | 6579 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from a `SavedModel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from tensorflow.contrib.predictor import predictor
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
DEFAULT_TAGS = 'serve'
_DEFAULT_INPUT_ALTERNATIVE_FORMAT = 'default_input_alternative:{}'
def get_meta_graph_def(saved_model_dir, tags):
"""Gets `MetaGraphDef` from a directory containing a `SavedModel`.
Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel.
tags: Comma separated list of tags used to identify the correct
`MetaGraphDef`.
Raises:
ValueError: An error when the given tags cannot be found.
Returns:
A `MetaGraphDef` corresponding to the given tags.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set([tag.strip() for tag in tags.split(',')])
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = get_meta_graph_def(export_dir, tags)
try:
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def,
signature_def_key)
except ValueError as e:
try:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def, formatted_key)
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
except ValueError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
return signature_def
def _check_signature_arguments(signature_def_key,
signature_def,
input_names,
output_names):
"""Validates signature arguments for `SavedModelPredictor`."""
signature_def_key_specified = signature_def_key is not None
signature_def_specified = signature_def is not None
input_names_specified = input_names is not None
output_names_specified = output_names is not None
if input_names_specified != output_names_specified:
raise ValueError(
'input_names and output_names must both be specified or both be '
'unspecified.'
)
if (signature_def_key_specified + signature_def_specified +
input_names_specified > 1):
raise ValueError(
'You must specify at most one of signature_def_key OR signature_def OR'
'(input_names AND output_names).'
)
class SavedModelPredictor(predictor.Predictor):
"""A `Predictor` constructed from a `SavedModel`."""
def __init__(self,
export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def` should be specified.
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Comma separated list of tags that will be used to retrieve
the correct `SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
Raises:
ValueError: If more than one of signature_def_key OR signature_def OR
(input_names AND output_names) is specified.
"""
_check_signature_arguments(
signature_def_key, signature_def, input_names, output_names)
tags = tags or DEFAULT_TAGS
self._graph = graph or ops.Graph()
with self._graph.as_default():
self._session = session.Session()
loader.load(self._session, tags.split(','), export_dir)
if input_names is None:
if signature_def is None:
signature_def = _get_signature_def(signature_def_key, export_dir, tags)
input_names = {k: v.name for k, v in signature_def.inputs.items()}
output_names = {k: v.name for k, v in signature_def.outputs.items()}
self._feed_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in input_names.items()}
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in output_names.items()}
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_status_code_count.py | 5 | 1317 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineStatusCodeCount(Model):
"""The status code and count of the virtual machine scale set instance view
status summary.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: The instance view status code.
:vartype code: str
:ivar count: The number of instances having a particular status code.
:vartype count: int
"""
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(self, **kwargs):
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
| mit |
shadowmint/python-nark | src/nark/assets.py | 2 | 2316 | # Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
class Assets():
""" Helper for resolving paths in a convenient manner """
__base = ""
""" The path which is the universal root for this assets object """
def __init__(self, base=""):
""" Base is the path path for the loader; for getcwd() if not provided """
if base == "":
base = os.getcwd()
self.__base = os.path.abspath(base)
def resolve(self, *args):
""" Appropriately resolves a path in the form blah, blah, blah.
Base is attached as the root to this set of path elements.
Raises BadFileException on failure to find path.
"""
rtn = os.path.join(self.__base, *args)
if not self.__exists(rtn):
raise BadFileException("Invalid file: '%s'" % rtn)
return rtn
def new(self, *args):
""" Returns full path to a new file, if its a valid new file.
Base is attached as the root to this set of path elements.
Raises BadFileException on failure to find parent path.
"""
rtn = os.path.join(self.__base, *args)
if len(args) > 1:
parent = args[:-1]
parent = os.path.join(self.__base, *parent)
if not self.__exists(parent):
raise BadFileException("Invalid parent path: '%s'" % rtn)
return rtn
def exists(self, *args):
""" Returns false if the file does not exist """
rtn = os.path.join(self.__base, *args)
if not self.__exists(rtn):
return False
return rtn
def __exists(self, path):
""" Check a path exists """
rtn = os.path.isdir(path) or (os.path.isfile(path) and os.access(path, os.R_OK))
return rtn
def base(self):
""" Returns own base path """
return self.__base
class BadFileException(Exception):
pass
| apache-2.0 |
avocado-framework/avocado-vt | virttest/unittests/test_utils_test__init__.py | 4 | 1926 | import unittest
import logging
try:
from unittest import mock
except ImportError:
import mock
from virttest.utils_test import update_boot_option
from virttest import utils_test
from avocado.core import exceptions
check_kernel_cmdline_mock = mock.MagicMock(return_value=["3", None])
@mock.patch('virttest.utils_package.package_install')
@mock.patch.object(utils_test, 'check_kernel_cmdline', check_kernel_cmdline_mock)
class TestUpdateBootOptionZipl(unittest.TestCase):
vm = mock.MagicMock()
session = mock.MagicMock()
# login_timeout
vm.params.get.return_value = "0"
# mocked session, always succeed
vm.wait_for_login.return_value = session
session.cmd_status_output.return_value = [0, ""]
def tearDown(self):
check_kernel_cmdline_mock.reset_mock()
self.session.cmd_status_output.reset_mock()
def test_args_no_zipl(self, *mocks):
update_boot_option(self.vm, args_added="3", need_reboot=False)
utils_test.check_kernel_cmdline.assert_called_once()
self.session.cmd_status_output.assert_called_once()
def test_args_zipl(self, *mocks):
update_boot_option(self.vm, args_added="3", need_reboot=False, guest_arch_name="s390x")
utils_test.check_kernel_cmdline.assert_called_once()
self.assertEqual(2, self.session.cmd_status_output.call_count)
# Test error handling for session.cmd_status_output
some_error_message = "some error"
@mock.patch.object(utils_test.logging, 'error')
def test_cmd_fail(self, *mocks):
self.session.cmd_status_output.return_value = [1, self.some_error_message]
with self.assertRaises(exceptions.TestError) as e:
update_boot_option(self.vm, args_added="3", need_reboot=False)
self.assertIsNotNone(e.exception.args[0])
logging.error.assert_called_with(self.some_error_message)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
zbraniecki/translate | translate/lang/km.py | 29 | 1949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Khmer language.
.. seealso:: http://en.wikipedia.org/wiki/Khmer_language
"""
import re
from translate.lang import common
class km(common.Common):
"""This class represents Khmer."""
khmerpunc = u"។៕៖៘"
"""These marks are only used for Khmer."""
punctuation = u"".join([common.Common.commonpunc, common.Common.quotes,
common.Common.miscpunc, khmerpunc])
sentenceend = u"!?…។៕៘"
sentencere = re.compile(r"""(?s) #make . also match newlines
.*? #anything, but match non-greedy
[%s] #the puntuation for sentence ending
\s+ #the spacing after the puntuation
(?=[^a-z\d])#lookahead that next part starts with caps
""" % sentenceend, re.VERBOSE)
#\u00a0 is non-breaking space
puncdict = {
u".": u"\u00a0។",
u":": u"\u00a0៖",
u"!": u"\u00a0!",
u"?": u"\u00a0?",
}
ignoretests = ["startcaps", "simplecaps"]
mozilla_nplurals = 2
mozilla_pluralequation = "n!=1 ? 1 : 0"
| gpl-2.0 |
PetePriority/home-assistant | tests/components/notify/test_facebook.py | 4 | 4087 | """The test for the Facebook notify module."""
import unittest
import requests_mock
import homeassistant.components.notify.facebook as facebook
class TestFacebook(unittest.TestCase):
"""Tests for Facebook notification service."""
def setUp(self):
"""Set up test variables."""
access_token = "page-access-token"
self.facebook = facebook.FacebookNotificationService(access_token)
@requests_mock.Mocker()
def test_send_simple_message(self, mock):
"""Test sending a simple message with success."""
mock.register_uri(
requests_mock.POST,
facebook.BASE_URL,
status_code=200
)
message = "This is just a test"
target = ["+15555551234"]
self.facebook.send_message(message=message, target=target)
assert mock.called
assert mock.call_count == 1
expected_body = {
"recipient": {"phone_number": target[0]},
"message": {"text": message}
}
assert mock.last_request.json() == expected_body
expected_params = {"access_token": ["page-access-token"]}
assert mock.last_request.qs == expected_params
@requests_mock.Mocker()
def test_sending_multiple_messages(self, mock):
"""Test sending a message to multiple targets."""
mock.register_uri(
requests_mock.POST,
facebook.BASE_URL,
status_code=200
)
message = "This is just a test"
targets = ["+15555551234", "+15555551235"]
self.facebook.send_message(message=message, target=targets)
assert mock.called
assert mock.call_count == 2
for idx, target in enumerate(targets):
request = mock.request_history[idx]
expected_body = {
"recipient": {"phone_number": target},
"message": {"text": message}
}
assert request.json() == expected_body
expected_params = {"access_token": ["page-access-token"]}
assert request.qs == expected_params
@requests_mock.Mocker()
def test_send_message_attachment(self, mock):
"""Test sending a message with a remote attachment."""
mock.register_uri(
requests_mock.POST,
facebook.BASE_URL,
status_code=200
)
message = "This will be thrown away."
data = {
"attachment": {
"type": "image",
"payload": {"url": "http://www.example.com/image.jpg"}
}
}
target = ["+15555551234"]
self.facebook.send_message(message=message, data=data, target=target)
assert mock.called
assert mock.call_count == 1
expected_body = {
"recipient": {"phone_number": target[0]},
"message": data
}
assert mock.last_request.json() == expected_body
expected_params = {"access_token": ["page-access-token"]}
assert mock.last_request.qs == expected_params
@requests_mock.Mocker()
def test_send_targetless_message(self, mock):
"""Test sending a message without a target."""
mock.register_uri(
requests_mock.POST,
facebook.BASE_URL,
status_code=200
)
self.facebook.send_message(message="goin nowhere")
assert not mock.called
@requests_mock.Mocker()
def test_send_message_with_400(self, mock):
"""Test sending a message with a 400 from Facebook."""
mock.register_uri(
requests_mock.POST,
facebook.BASE_URL,
status_code=400,
json={
"error": {
"message": "Invalid OAuth access token.",
"type": "OAuthException",
"code": 190,
"fbtrace_id": "G4Da2pFp2Dp"
}
}
)
self.facebook.send_message(message="nope!", target=["+15555551234"])
assert mock.called
assert mock.call_count == 1
| apache-2.0 |
lukeburden/django-allauth | allauth/socialaccount/providers/patreon/views.py | 3 | 2164 | """
Views for PatreonProvider
https://www.patreon.com/platform/documentation/oauth
"""
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import API_URL, USE_API_V2, PatreonProvider
class PatreonOAuth2Adapter(OAuth2Adapter):
provider_id = PatreonProvider.id
access_token_url = 'https://www.patreon.com/api/oauth2/token'
authorize_url = 'https://www.patreon.com/oauth2/authorize'
profile_url = '{0}/{1}'.format(
API_URL,
'identity?include=memberships&fields%5Buser%5D=email,first_name,'
'full_name,image_url,last_name,social_connections,'
'thumb_url,url,vanity' if USE_API_V2
else 'current_user')
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
headers={'Authorization': 'Bearer ' + token.token})
extra_data = resp.json().get('data')
if USE_API_V2:
# Extract tier/pledge level for Patreon API v2:
try:
member_id = extra_data['relationships']['memberships']['data'][
0]['id']
member_url = ('{0}/members/{1}?include='
'currently_entitled_tiers&fields%5Btier%5D=title'
).format(API_URL, member_id)
resp_member = requests.get(member_url,
headers={'Authorization': 'Bearer '
+ token.token})
pledge_title = resp_member.json(
)['included'][0]['attributes']['title']
extra_data["pledge_level"] = pledge_title
except (KeyError, IndexError):
extra_data["pledge_level"] = None
pass
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PatreonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PatreonOAuth2Adapter)
| mit |
naokimiyasaka/sublime-text | Backup/20140325101416/ConvertToUTF8/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
oliverlee/sympy | sympy/utilities/tests/test_iterables.py | 58 | 26629 | from __future__ import print_function
from textwrap import dedent
from sympy import (
symbols, Integral, Tuple, Dummy, Basic, default_sort_key, Matrix,
factorial, true)
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.core.compatibility import range
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, dict_merge, filter_symbols,
flatten, generate_bell, generate_derangements, generate_involutions,
generate_oriented_forest, group, has_dups, kbins, minlex, multiset,
multiset_combinations, multiset_partitions,
multiset_permutations, necklaces, numbered_symbols, ordered, partitions,
permutations, postfixes, postorder_traversal, prefixes, reshape,
rotate_left, rotate_right, runs, sift, subsets, take, topological_sort,
unflatten, uniq, variations)
from sympy.utilities.enumerative import (
factoring_visitor, multiset_partitions_taocp )
from sympy.core.singleton import S
from sympy.functions.elementary.piecewise import Piecewise, ExprCondPair
from sympy.utilities.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_postorder_traversal():
expr = z + w*(x + y)
expected = [z, w, x, y, x + y, w*(x + y), w*(x + y) + z]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(expr, keys=True)) == expected
expr = Piecewise((x, x < 1), (x**2, True))
expected = [
x, 1, x, x < 1, ExprCondPair(x, x < 1),
2, x, x**2, true,
ExprCondPair(x**2, True), Piecewise((x, x < 1), (x**2, True))
]
assert list(postorder_traversal(expr, keys=default_sort_key)) == expected
assert list(postorder_traversal(
[expr], keys=default_sort_key)) == expected + [[expr]]
assert list(postorder_traversal(Integral(x**2, (x, 0, 1)),
keys=default_sort_key)) == [
2, x, x**2, 0, 1, x, Tuple(x, 0, 1),
Integral(x**2, Tuple(x, 0, 1))
]
assert list(postorder_traversal(('abc', ('d', 'ef')))) == [
'abc', 'd', 'ef', ('d', 'ef'), ('abc', ('d', 'ef'))]
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls
assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten(set([1, 11, 2])) == list(set([1, 11, 2]))
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_filter_symbols():
s = numbered_symbols()
filtered = filter_symbols(s, symbols("x0 x2 x3"))
assert take(filtered, 3) == list(symbols("x1 x4 x5"))
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
assert next(numbered_symbols('C', start=1, exclude=[symbols('C1')])) == \
symbols('C2')
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z}
def test_prefixes():
assert list(prefixes([])) == []
assert list(prefixes([1])) == [[1]]
assert list(prefixes([1, 2])) == [[1], [1, 2]]
assert list(prefixes([1, 2, 3, 4, 5])) == \
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]
def test_postfixes():
assert list(postfixes([])) == []
assert list(postfixes([1])) == [[1]]
assert list(postfixes([1, 2])) == [[2], [1, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
ans = [('mpsyy',), ('mpsy', 'y'), ('mps', 'yy'), ('mps', 'y', 'y'),
('mpyy', 's'), ('mpy', 'sy'), ('mpy', 's', 'y'), ('mp', 'syy'),
('mp', 'sy', 'y'), ('mp', 's', 'yy'), ('mp', 's', 'y', 'y'),
('msyy', 'p'), ('msy', 'py'), ('msy', 'p', 'y'), ('ms', 'pyy'),
('ms', 'py', 'y'), ('ms', 'p', 'yy'), ('ms', 'p', 'y', 'y'),
('myy', 'ps'), ('myy', 'p', 's'), ('my', 'psy'), ('my', 'ps', 'y'),
('my', 'py', 's'), ('my', 'p', 'sy'), ('my', 'p', 's', 'y'),
('m', 'psyy'), ('m', 'psy', 'y'), ('m', 'ps', 'yy'),
('m', 'ps', 'y', 'y'), ('m', 'pyy', 's'), ('m', 'py', 'sy'),
('m', 'py', 's', 'y'), ('m', 'p', 'syy'),
('m', 'p', 'sy', 'y'), ('m', 'p', 's', 'yy'),
('m', 'p', 's', 'y', 'y')]
assert list(tuple("".join(part) for part in p)
for p in multiset_partitions('sympy')) == ans
factorings = [[24], [8, 3], [12, 2], [4, 6], [4, 2, 3],
[6, 2, 2], [2, 2, 2, 3]]
assert list(factoring_visitor(p, [2,3]) for
p in multiset_partitions_taocp([3, 1])) == factorings
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
def test_partitions():
assert [p.copy() for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=2, m=2)] == []
assert [p.copy() for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p.copy() for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i.copy() for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i.copy() for i in partitions(4) if all(k <= 3 for k in i)]
raises(ValueError, lambda: list(partitions(3, 0)))
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i = i+1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(set(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)]
# generate_bell and trotterjohnson are advertised to return the same
# permutations; this is not technically necessary so this test could
# be removed
for n in range(1, 5):
p = Permutation(range(n))
b = generate_bell(n)
for bi in b:
assert bi == tuple(p.array_form)
p = p.next_trotterjohnson()
raises(ValueError, lambda: list(generate_bell(0))) # XXX is this consistent with other permutation algorithms?
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len(set([Permutation(j)**2 for j in i])) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
def test_necklaces():
def count(n, k, f):
return len(list(necklaces(n, k, f)))
m = []
for i in range(1, 8):
m.append((
i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1)))
assert Matrix(m) == Matrix([
[1, 2, 2, 3],
[2, 3, 3, 6],
[3, 4, 4, 10],
[4, 6, 6, 21],
[5, 8, 8, 39],
[6, 14, 13, 92],
[7, 20, 18, 198]])
def test_bracelets():
bc = [i for i in bracelets(2, 4)]
assert Matrix(bc) == Matrix([
[0, 0],
[0, 1],
[0, 2],
[0, 3],
[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]
])
bc = [i for i in bracelets(4, 2)]
assert Matrix(bc) == Matrix([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1],
[1, 1, 1, 1]
])
def test_generate_oriented_forest():
assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0],
[0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2],
[0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0],
[0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0],
[0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
assert len(list(generate_oriented_forest(10))) == 1842
def test_unflatten():
r = list(range(10))
assert unflatten(r) == list(zip(r[::2], r[1::2]))
assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])]
raises(ValueError, lambda: unflatten(list(range(10)), 3))
raises(ValueError, lambda: unflatten(list(range(10)), -2))
def test_common_prefix_suffix():
assert common_prefix([], [1]) == []
assert common_prefix(list(range(3))) == [0, 1, 2]
assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2]
assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2]
assert common_prefix([1, 2, 3], [1, 3, 5]) == [1]
assert common_suffix([], [1]) == []
assert common_suffix(list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(4))) == []
assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3]
assert common_suffix([1, 2, 3], [9, 7, 3]) == [3]
def test_minlex():
assert minlex([1, 2, 0]) == (0, 1, 2)
assert minlex((1, 2, 0)) == (0, 1, 2)
assert minlex((1, 0, 2)) == (0, 2, 1)
assert minlex((1, 0, 2), directed=False) == (0, 1, 2)
assert minlex('aba') == 'aab'
def test_ordered():
assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]]
assert list(ordered((x, y), hash, default=False)) == \
list(ordered((y, x), hash, default=False))
assert list(ordered((x, y))) == [x, y]
seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]],
(lambda x: len(x), lambda x: sum(x))]
assert list(ordered(seq, keys, default=False, warn=False)) == \
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
raises(ValueError, lambda:
list(ordered(seq, keys, default=False, warn=True)))
def test_runs():
assert runs([]) == []
assert runs([1]) == [[1]]
assert runs([1, 1]) == [[1], [1]]
assert runs([1, 1, 2]) == [[1], [1, 2]]
assert runs([1, 2, 1]) == [[1, 2], [1]]
assert runs([2, 1, 1]) == [[2], [1], [1]]
from operator import lt
assert runs([2, 1, 1], lt) == [[2, 1], [1]]
def test_reshape():
seq = list(range(1, 9))
assert reshape(seq, [4]) == \
[[1, 2, 3, 4], [5, 6, 7, 8]]
assert reshape(seq, (4,)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, 2)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, [2])) == \
[(1, 2, [3, 4]), (5, 6, [7, 8])]
assert reshape(seq, ((2,), [2])) == \
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
assert reshape(seq, (1, [2], 1)) == \
[(1, [2, 3], 4), (5, [6, 7], 8)]
assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
assert reshape(tuple(seq), ([1], 1, (2,))) == \
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
assert reshape(list(range(12)), [2, [3], set([2]), (1, (3,), 1)]) == \
[[0, 1, [2, 3, 4], set([5, 6]), (7, (8, 9, 10), 11)]]
def test_uniq():
assert list(uniq(p.copy() for p in partitions(4))) == \
[{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]
assert list(uniq(x % 2 for x in range(5))) == [0, 1]
assert list(uniq('a')) == ['a']
assert list(uniq('ababc')) == list('abc')
assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]]
assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \
[([1], 2, 2), (2, [1], 2), (2, 2, [1])]
assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \
[2, 3, 4, [2], [1], [3]]
def test_kbins():
assert len(list(kbins('1123', 2, ordered=1))) == 24
assert len(list(kbins('1123', 2, ordered=11))) == 36
assert len(list(kbins('1123', 2, ordered=10))) == 10
assert len(list(kbins('1123', 2, ordered=0))) == 5
assert len(list(kbins('1123', 2, ordered=None))) == 3
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins([0, 0, 1], 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [0, 1]]
[[0, 0], [1]]
ordered = 0
[[0, 0], [1]]
[[0, 1], [0]]
ordered = 1
[[0], [0, 1]]
[[0], [1, 0]]
[[1], [0, 0]]
ordered = 10
[[0, 0], [1]]
[[1], [0, 0]]
[[0, 1], [0]]
[[0], [0, 1]]
ordered = 11
[[0], [0, 1]]
[[0, 0], [1]]
[[0], [1, 0]]
[[0, 1], [0]]
[[1], [0, 0]]
[[1, 0], [0]]\n''')
def test():
for ordered in [None, 0, 1, 10, 11]:
print('ordered =', ordered)
for p in kbins(list(range(3)), 2, ordered=ordered):
print(' ', p)
assert capture(lambda : test()) == dedent('''\
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]\n''')
def test_has_dups():
assert has_dups(set()) is False
assert has_dups(list(range(3))) is False
assert has_dups([1, 2, 1]) is True
def test__partition():
assert _partition('abcde', [1, 0, 1, 2, 0]) == [
['b', 'e'], ['a', 'c'], ['d']]
assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [
['b', 'e'], ['a', 'c'], ['d']]
output = (3, [1, 0, 1, 2, 0])
assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']]
| bsd-3-clause |
tersmitten/ansible | lib/ansible/modules/network/fortios/fortios_firewall_internet_service_custom.py | 24 | 13552 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_internet_service_custom
short_description: Configure custom Internet Services in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and internet_service_custom category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_internet_service_custom:
description:
- Configure custom Internet Services.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Comment.
disable-entry:
description:
- Disable entries in the Internet Service database.
suboptions:
id:
description:
- Disable entry ID.
required: true
ip-range:
description:
- IP ranges in the disable entry.
suboptions:
end-ip:
description:
- End IP address.
id:
description:
- Disable entry range ID.
required: true
start-ip:
description:
- Start IP address.
port:
description:
- Integer value for the TCP/IP port (0 - 65535).
protocol:
description:
- Integer value for the protocol type as defined by IANA (0 - 255).
entry:
description:
- Entries added to the Internet Service database and custom database.
suboptions:
dst:
description:
- Destination address or address group name.
suboptions:
name:
description:
- Select the destination address or address group object from available options. Source firewall.address.name firewall
.addrgrp.name.
required: true
id:
description:
- Entry ID(1-255).
required: true
port-range:
description:
- Port ranges in the custom entry.
suboptions:
end-port:
description:
- Integer value for ending TCP/UDP/SCTP destination port in range (1 to 65535).
id:
description:
- Custom entry port range ID.
required: true
start-port:
description:
- Integer value for starting TCP/UDP/SCTP destination port in range (1 to 65535).
protocol:
description:
- Integer value for the protocol type as defined by IANA (0 - 255).
master-service-id:
description:
- Internet Service ID in the Internet Service database. Source firewall.internet-service.id.
name:
description:
- Internet Service name.
required: true
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure custom Internet Services.
fortios_firewall_internet_service_custom:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_internet_service_custom:
state: "present"
comment: "Comment."
disable-entry:
-
id: "5"
ip-range:
-
end-ip: "<your_own_value>"
id: "8"
start-ip: "<your_own_value>"
port: "10"
protocol: "11"
entry:
-
dst:
-
name: "default_name_14 (source firewall.address.name firewall.addrgrp.name)"
id: "15"
port-range:
-
end-port: "17"
id: "18"
start-port: "19"
protocol: "20"
master-service-id: "21 (source firewall.internet-service.id)"
name: "default_name_22"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_internet_service_custom_data(json):
option_list = ['comment', 'disable-entry', 'entry',
'master-service-id', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_internet_service_custom(data, fos):
vdom = data['vdom']
firewall_internet_service_custom_data = data['firewall_internet_service_custom']
filtered_data = filter_firewall_internet_service_custom_data(firewall_internet_service_custom_data)
if firewall_internet_service_custom_data['state'] == "present":
return fos.set('firewall',
'internet-service-custom',
data=filtered_data,
vdom=vdom)
elif firewall_internet_service_custom_data['state'] == "absent":
return fos.delete('firewall',
'internet-service-custom',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_internet_service_custom']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_internet_service_custom": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"disable-entry": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"ip-range": {"required": False, "type": "list",
"options": {
"end-ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start-ip": {"required": False, "type": "str"}
}},
"port": {"required": False, "type": "int"},
"protocol": {"required": False, "type": "int"}
}},
"entry": {"required": False, "type": "list",
"options": {
"dst": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"id": {"required": True, "type": "int"},
"port-range": {"required": False, "type": "list",
"options": {
"end-port": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"start-port": {"required": False, "type": "int"}
}},
"protocol": {"required": False, "type": "int"}
}},
"master-service-id": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ezequielpereira/Time-Line | libs_arm/wx/lib/agw/ribbon/bar.py | 3 | 52186 | # --------------------------------------------------------------------------- #
# RIBBONBAR Library wxPython IMPLEMENTATION
#
# Original C++ Code From Peter Cawley.
#
# Current wxRibbon Version Tracked: wxWidgets 2.9.0 SVN HEAD
#
#
# Python Code By:
#
# Andrea Gavana, @ 15 Oct 2009
# Latest Revision: 17 Aug 2011, 15.00 GMT
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
Top-level control in a ribbon user interface.
Description
===========
Serves as a tabbed container for :class:`~lib.agw.ribbon.page.RibbonPage` - a ribbon user interface typically
has a ribbon bar, which contains one or more RibbonPages, which in turn each contains
one or more RibbonPanels, which in turn contain controls. While a :class:`RibbonBar` has
tabs similar to a :class:`Notebook`, it does not follow the same API for adding pages.
Containers like :class:`Notebook` can contain any type of window as a page, hence the
normal procedure is to create the sub-window and then call :meth:`BookCtrlBase.AddPage` ().
As :class:`RibbonBar` can only have :class:`~lib.agw.ribbon.page.RibbonPage` as children
(and a :class:`~lib.agw.ribbon.page.RibbonPage` can only have a :class:`RibbonBar` as parent),
when a page is created, it is automatically added to the bar - there is no `AddPage` equivalent to call.
After all pages have been created, and all controls and panels placed on those pages,
meth:`~RibbonBar.Realize` must be called.
Window Styles
=============
This class supports the following window styles:
========================================== =========== ==========================================
Window Styles Hex Value Description
========================================== =========== ==========================================
``RIBBON_BAR_DEFAULT_STYLE`` 0x9 Defined as ``RIBBON_BAR_FLOW_HORIZONTAL`` | ``RIBBON_BAR_SHOW_PAGE_LABELS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS``
``RIBBON_BAR_FOLDBAR_STYLE`` 0x1e Defined as ``RIBBON_BAR_FLOW_VERTICAL`` | ``RIBBON_BAR_SHOW_PAGE_ICONS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` | ``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS``
``RIBBON_BAR_SHOW_PAGE_LABELS`` 0x1 Causes labels to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_SHOW_PAGE_ICONS`` 0x2 Causes icons to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_FLOW_HORIZONTAL`` 0x0 Causes panels within pages to stack horizontally.
``RIBBON_BAR_FLOW_VERTICAL`` 0x4 Causes panels within pages to stack vertically.
``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` 0x8 Causes extension buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS`` 0x10 Causes minimise buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_ALWAYS_SHOW_TABS`` 0x20 Always shows the tabs area even when only one tab is added.
========================================== =========== ==========================================
Events Processing
=================
This class processes the following events:
================================= =================================
Event Name Description
================================= =================================
``EVT_RIBBONBAR_PAGE_CHANGED`` Triggered after the transition from one page being active to a different page being active.
``EVT_RIBBONBAR_PAGE_CHANGING`` Triggered prior to the transition from one page being active to a different page being active, and can veto the change.
``EVT_RIBBONBAR_TAB_MIDDLE_DOWN`` Triggered when the middle mouse button is pressed on a tab.
``EVT_RIBBONBAR_TAB_MIDDLE_UP`` Triggered when the middle mouse button is released on a tab.
``EVT_RIBBONBAR_TAB_RIGHT_DOWN`` Triggered when the right mouse button is pressed on a tab.
``EVT_RIBBONBAR_TAB_RIGHT_UP`` Triggered when the right mouse button is released on a tab.
``EVT_RIBBONBAR_TAB_LEFT_DCLICK`` Triggered when the user double-clicks on a tab.
================================= =================================
See Also
========
:class:`~lib.agw.ribbon.page.RibbonPage`, :class:`~lib.agw.ribbon.panel.RibbonPanel`
"""
import wx
import types
from control import RibbonControl
from art_internal import RibbonPageTabInfo
from art_msw import RibbonMSWArtProvider
from art import *
wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGED = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGING = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_DOWN = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_UP = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_DOWN = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_UP = wx.NewEventType()
wxEVT_COMMAND_RIBBONBAR_TAB_LEFT_DCLICK = wx.NewEventType()
EVT_RIBBONBAR_PAGE_CHANGED = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGED, 1)
EVT_RIBBONBAR_PAGE_CHANGING = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGING, 1)
EVT_RIBBONBAR_TAB_MIDDLE_DOWN = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_DOWN, 1)
EVT_RIBBONBAR_TAB_MIDDLE_UP = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_UP, 1)
EVT_RIBBONBAR_TAB_RIGHT_DOWN = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_DOWN, 1)
EVT_RIBBONBAR_TAB_RIGHT_UP = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_UP, 1)
EVT_RIBBONBAR_TAB_LEFT_DCLICK = wx.PyEventBinder(wxEVT_COMMAND_RIBBONBAR_TAB_LEFT_DCLICK, 1)
def SET_FLAG(variable, flag):
refresh_tabs = False
if variable & flag != flag:
variable |= flag
refresh_tabs = True
return variable, refresh_tabs
def UNSET_FLAG(variable, flag):
refresh_tabs = False
if variable & flag:
variable &= ~flag
refresh_tabs = True
return variable, refresh_tabs
class RibbonBarEvent(wx.NotifyEvent):
"""
Event used to indicate various actions relating to a :class:`RibbonBar`.
.. seealso:: :class:`RibbonBar` for available event types.
"""
def __init__(self, command_type=None, win_id=0, page=None):
"""
Default class constructor.
:param integer `command_type`: the event type;
:param integer `win_id`: the event identifier;
:param `page`: an instance of :class:`~lib.agw.ribbon.page.RibbonPage`.
"""
wx.NotifyEvent.__init__(self, command_type, win_id)
self._page = page
self._isAllowed = True
def GetPage(self):
"""
Returns the page being changed to, or being clicked on.
:returns: An instance of :class:`~lib.agw.ribbon.page.RibbonPage`.
"""
return self._page
def SetPage(self, page):
"""
Sets the page relating to this event.
:param `page`: an instance of :class:`~lib.agw.ribbon.page.RibbonPage`.
"""
self._page = page
def Allow(self):
"""
This is the opposite of :meth:`~RibbonBarEvent.Veto`: it explicitly allows the event to be processed.
For most events it is not necessary to call this method as the events are
allowed anyhow but some are forbidden by default (this will be mentioned
in the corresponding event description).
"""
self._isAllowed = True
def Veto(self):
"""
Prevents the change announced by this event from happening.
:note: It is in general a good idea to notify the user about the reasons
for vetoing the change because otherwise the applications behaviour (which
just refuses to do what the user wants) might be quite surprising.
"""
self._isAllowed = False
def IsAllowed(self):
"""
Returns ``True`` if the change is allowed (:meth:`~RibbonBarEvent.Veto` hasn't been called) or
``False`` otherwise (if it was).
"""
return self._isAllowed
class RibbonBar(RibbonControl):
""" Top-level control in a ribbon user interface. """
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, agwStyle=RIBBON_BAR_DEFAULT_STYLE,
validator=wx.DefaultValidator, name="RibbonBar"):
"""
Default constructor.
:param `parent`: pointer to a parent window, must not be ``None``;
:type `parent`: :class:`Window`
:param integer `id`: window identifier. If ``wx.ID_ANY``, will automatically create
an identifier;
:param `pos`: window position. ``wx.DefaultPosition`` indicates that wxPython
should generate a default position for the window;
:type `pos`: tuple or :class:`Point`
:param `size`: window size. ``wx.DefaultSize`` indicates that wxPython should
generate a default size for the window. If no suitable size can be found, the
window will be sized to 20x20 pixels so that the window is visible but obviously
not correctly sized;
:type `size`: tuple or :class:`Size`
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
========================================== =========== ==========================================
Window Styles Hex Value Description
========================================== =========== ==========================================
``RIBBON_BAR_DEFAULT_STYLE`` 0x9 Defined as ``RIBBON_BAR_FLOW_HORIZONTAL`` | ``RIBBON_BAR_SHOW_PAGE_LABELS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS``
``RIBBON_BAR_FOLDBAR_STYLE`` 0x1e Defined as ``RIBBON_BAR_FLOW_VERTICAL`` | ``RIBBON_BAR_SHOW_PAGE_ICONS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` | ``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS``
``RIBBON_BAR_SHOW_PAGE_LABELS`` 0x1 Causes labels to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_SHOW_PAGE_ICONS`` 0x2 Causes icons to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_FLOW_HORIZONTAL`` 0x0 Causes panels within pages to stack horizontally.
``RIBBON_BAR_FLOW_VERTICAL`` 0x4 Causes panels within pages to stack vertically.
``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` 0x8 Causes extension buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS`` 0x10 Causes minimise buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_ALWAYS_SHOW_TABS`` 0x20 Always shows the tabs area even when only one tab is added.
========================================== =========== ==========================================
:param `validator`: the window validator;
:type `validator`: :class:`Validator`
:param string `name`: the window name.
"""
RibbonControl.__init__(self, parent, id, pos, size, style=wx.NO_BORDER)
self._flags = 0
self._tabs_total_width_ideal = 0
self._tabs_total_width_minimum = 0
self._tab_margin_left = 0
self._tab_margin_right = 0
self._tab_height = 0
self._tab_scroll_amount = 0
self._current_page = -1
self._current_hovered_page = -1
self._tab_scroll_left_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_right_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_buttons_shown = False
self._arePanelsShown = True
self._pages = []
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnMouseDoubleClick)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMouseMiddleDown)
self.Bind(wx.EVT_MIDDLE_UP, self.OnMouseMiddleUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnMouseRightUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.CommonInit(agwStyle)
def AddPage(self, page):
"""
Adds a page to the :class:`RibbonBar`.
:param `page`: an instance of :class:`~lib.agw.ribbon.page.RibbonPage`.
"""
info = RibbonPageTabInfo()
info.page = page
info.active = False
info.hovered = False
# info.rect not set (intentional)
dcTemp = wx.ClientDC(self)
label = ""
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = page.GetLabel()
icon = wx.NullBitmap
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = page.GetIcon()
info.ideal_width, info.small_begin_need_separator_width, \
info.small_must_have_separator_width, info.minimum_width = self._art.GetBarTabWidth(dcTemp, self, label, icon, info.ideal_width,
info.small_begin_need_separator_width,
info.small_must_have_separator_width, info.minimum_width)
if not self._pages:
self._tabs_total_width_ideal = info.ideal_width
self._tabs_total_width_minimum = info.minimum_width
else:
sep = self._art.GetMetric(RIBBON_ART_TAB_SEPARATION_SIZE)
self._tabs_total_width_ideal += sep + info.ideal_width
self._tabs_total_width_minimum += sep + info.minimum_width
self._pages.append(info)
page.Hide() # Most likely case is that self new page is not the active tab
page.SetArtProvider(self._art)
if len(self._pages) == 1:
self.SetActivePage(0)
def DismissExpandedPanel(self):
"""
Dismiss the expanded panel of the currently active page.
Calls and returns the value from :meth:`RibbonPage.DismissExpandedPanel() <RibbonPage.DismissExpandedPanel>` for the
currently active page, or ``False`` if there is no active page.
"""
if self._current_page == -1:
return False
return self._pages[self._current_page].page.DismissExpandedPanel()
def ShowPanels(self, show=True):
"""
Shows or hides the panels inside :class:`RibbonBar`.
:param bool `show`: ``True`` to show the panels, ``False`` to hide them.
"""
self._arePanelsShown = show
self.SetMinSize(wx.Size(self.GetSize().GetWidth(), self.DoGetBestSize().GetHeight()))
self.Realise()
self.GetParent().Layout()
def SetAGWWindowStyleFlag(self, agwStyle):
"""
Sets the window style for :class:`RibbonBar`.
:param integer `agwStyle`: can be a combination of the following bits:
========================================== =========== ==========================================
Window Styles Hex Value Description
========================================== =========== ==========================================
``RIBBON_BAR_DEFAULT_STYLE`` 0x9 Defined as ``RIBBON_BAR_FLOW_HORIZONTAL`` | ``RIBBON_BAR_SHOW_PAGE_LABELS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS``
``RIBBON_BAR_FOLDBAR_STYLE`` 0x1e Defined as ``RIBBON_BAR_FLOW_VERTICAL`` | ``RIBBON_BAR_SHOW_PAGE_ICONS`` | ``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` | ``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS``
``RIBBON_BAR_SHOW_PAGE_LABELS`` 0x1 Causes labels to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_SHOW_PAGE_ICONS`` 0x2 Causes icons to be shown on the tabs in the ribbon bar.
``RIBBON_BAR_FLOW_HORIZONTAL`` 0x0 Causes panels within pages to stack horizontally.
``RIBBON_BAR_FLOW_VERTICAL`` 0x4 Causes panels within pages to stack vertically.
``RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS`` 0x8 Causes extension buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS`` 0x10 Causes minimise buttons to be shown on panels (where the panel has such a button).
``RIBBON_BAR_ALWAYS_SHOW_TABS`` 0x20 Always shows the tabs area even when only one tab is added.
========================================== =========== ==========================================
:note: Please note that some styles cannot be changed after the window creation
and that `Refresh()` might need to be be called after changing the others for
the change to take place immediately.
"""
self._flags = agwStyle
if self._art:
self._art.SetFlags(agwStyle)
def GetAGWWindowStyleFlag(self):
"""
Returns the :class:`RibbonBar` window style flag.
:see: :meth:`~RibbonBar.SetAGWWindowStyleFlag` for a list of valid window styles.
"""
return self._flags
def Realize(self):
"""
Perform initial layout and size calculations of the bar and its children.
This must be called after all of the bar's children have been created (and their
children created, etc.) - if it is not, then windows may not be laid out or
sized correctly. Also calls :meth:`RibbonPage.Realize() <lib.agw.ribbon.page.RibbonPage.Realize>`
on each child page.
:note: Reimplemented from :class:`~lib.agw.ribbon.control.RibbonControl`.
"""
status = True
dcTemp = wx.ClientDC(self)
sep = self._art.GetMetric(RIBBON_ART_TAB_SEPARATION_SIZE)
numtabs = len(self._pages)
for i, info in enumerate(self._pages):
self.RepositionPage(info.page)
if not info.page.Realize():
status = False
label = ""
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = info.page.GetLabel()
icon = wx.NullBitmap
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = info.page.GetIcon()
info.ideal_width, info.small_begin_need_separator_width, \
info.small_must_have_separator_width, \
info.minimum_width = self._art.GetBarTabWidth(dcTemp, self, label, icon, info.ideal_width,
info.small_begin_need_separator_width, info.small_must_have_separator_width,
info.minimum_width)
if i == 0:
self._tabs_total_width_ideal = info.ideal_width
self._tabs_total_width_minimum = info.minimum_width
else:
self._tabs_total_width_ideal += sep + info.ideal_width
self._tabs_total_width_minimum += sep + info.minimum_width
self._tab_height = self._art.GetTabCtrlHeight(dcTemp, self, self._pages)
self.RecalculateMinSize()
self.RecalculateTabSizes()
self.Refresh()
return status
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
x, y = event.GetX(), event.GetY()
hovered_page = -1
refresh_tabs = False
if y < self._tab_height:
# It is quite likely that the mouse moved a small amount and is still over the same tab
if self._current_hovered_page != -1 and self._pages[self._current_hovered_page].rect.Contains((x, y)):
hovered_page = self._current_hovered_page
# But be careful, if tabs can be scrolled, then parts of the tab rect may not be valid
if self._tab_scroll_buttons_shown:
if x >= self._tab_scroll_right_button_rect.GetX() or x < self._tab_scroll_left_button_rect.GetRight():
hovered_page = -1
else:
hovered_page, dummy = self.HitTestTabs(event.GetPosition())
if hovered_page != self._current_hovered_page:
if self._current_hovered_page != -1:
self._pages[self._current_hovered_page].hovered = False
self._current_hovered_page = hovered_page
if self._current_hovered_page != -1:
self._pages[self._current_hovered_page].hovered = True
refresh_tabs = True
if self._tab_scroll_buttons_shown:
if self._tab_scroll_left_button_rect.Contains((x, y)):
self._tab_scroll_left_button_state, refresh_tabs = SET_FLAG(self._tab_scroll_left_button_state, RIBBON_SCROLL_BTN_HOVERED)
else:
self._tab_scroll_left_button_state, refresh_tabs = UNSET_FLAG(self._tab_scroll_left_button_state, RIBBON_SCROLL_BTN_HOVERED)
if self._tab_scroll_right_button_rect.Contains((x, y)):
self._tab_scroll_right_button_state, refresh_tabs = SET_FLAG(self._tab_scroll_right_button_state, RIBBON_SCROLL_BTN_HOVERED)
else:
self._tab_scroll_right_button_state, refresh_tabs = UNSET_FLAG(self._tab_scroll_right_button_state, RIBBON_SCROLL_BTN_HOVERED)
if refresh_tabs:
self.RefreshTabBar()
def OnMouseLeave(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
# The ribbon bar is (usually) at the top of a window, and at least on MSW, the mouse
# can leave the window quickly and leave a tab in the hovered state.
refresh_tabs = False
if self._current_hovered_page != -1:
self._pages[self._current_hovered_page].hovered = False
self._current_hovered_page = -1
refresh_tabs = True
if self._tab_scroll_left_button_state & RIBBON_SCROLL_BTN_HOVERED:
self._tab_scroll_left_button_state &= ~RIBBON_SCROLL_BTN_HOVERED
refresh_tabs = True
if self._tab_scroll_right_button_state & RIBBON_SCROLL_BTN_HOVERED:
self._tab_scroll_right_button_state &= ~RIBBON_SCROLL_BTN_HOVERED
refresh_tabs = True
if refresh_tabs:
self.RefreshTabBar()
def GetPage(self, n):
"""
Get a page by index.
``None`` will be returned if the given index is out of range.
:param integer `n`: the zero-based index indicating the page position.
"""
if n < 0 or n >= len(self._pages):
return 0
return self._pages[n].page
def SetActivePageByIndex(self, page):
"""
Set the active page by index, without triggering any events.
:param integer `page`: The zero-based index of the page to activate.
:returns: ``True`` if the specified page is now active, ``False`` if it could
not be activated (for example because the page index is invalid).
"""
if self._current_page == page:
return True
if page >= len(self._pages):
return False
if self._current_page != -1:
self._pages[self._current_page].active = False
self._pages[self._current_page].page.Hide()
self._current_page = page
self._pages[page].active = True
wnd = self._pages[page].page
self.RepositionPage(wnd)
wnd.Layout()
wnd.Show()
self.Refresh()
return True
def SetActivePageByPage(self, page):
"""
Set the active page, without triggering any events.
:param `page`: the page to activate, an instance of :class:`~lib.agw.ribbon.page.RibbonPage`.
:returns: ``True`` if the specified page is now active, ``False`` if it could
not be activated (for example because the given page is not a child of the
ribbon bar).
"""
for i in xrange(len(self._pages)):
if self._pages[i].page == page:
return self.SetActivePageByIndex(i)
return False
def SetActivePage(self, page):
""" See comments on :meth:`~RibbonBar.SetActivePageByIndex` and :meth:`~RibbonBar.SetActivePageByPage`. """
if isinstance(page, types.IntType):
return self.SetActivePageByIndex(page)
return self.SetActivePageByPage(page)
def GetActivePage(self):
"""
Get the index of the active page.
In the rare case of no page being active, -1 is returned.
"""
return self._current_page
def SetTabCtrlMargins(self, left, right):
"""
Set the margin widths (in pixels) on the left and right sides of the tab bar
region of the ribbon bar.
These margins will be painted with the tab background, but tabs and scroll
buttons will never be painted in the margins. The left margin could be used for
rendering something equivalent to the "Office Button", though this is not
currently implemented. The right margin could be used for rendering a help
button, and/or MDI buttons, but again, this is not currently implemented.
:param integer `left`: the left margin (in pixels);
:param integer `right`: the right margin (in pixels).
"""
self._tab_margin_left = left
self._tab_margin_right = right
self.RecalculateTabSizes()
def OrderPageTabInfoBySmallWidthAsc(self, first, second):
return first.small_must_have_separator_width - second.small_must_have_separator_width
def RecalculateTabSizes(self):
""" Recalculates the :class:`RibbonBar` tab sizes. """
numtabs = len(self._pages)
if numtabs == 0:
return
width = self.GetSize().GetWidth() - self._tab_margin_left - self._tab_margin_right
tabsep = self._art.GetMetric(RIBBON_ART_TAB_SEPARATION_SIZE)
x = self._tab_margin_left
y = 0
if width >= self._tabs_total_width_ideal:
# Simple case: everything at ideal width
for info in self._pages:
info.rect.x = x
info.rect.y = y
info.rect.width = info.ideal_width
info.rect.height = self._tab_height
x += info.rect.width + tabsep
self._tab_scroll_buttons_shown = False
self._tab_scroll_left_button_rect.SetWidth(0)
self._tab_scroll_right_button_rect.SetWidth(0)
elif width < self._tabs_total_width_minimum:
# Simple case: everything minimum with scrollbar
for info in self._pages:
info.rect.x = x
info.rect.y = y
info.rect.width = info.minimum_width
info.rect.height = self._tab_height
x += info.rect.width + tabsep
if not self._tab_scroll_buttons_shown:
self._tab_scroll_left_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_right_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_buttons_shown = True
temp_dc = wx.ClientDC(self)
self._tab_scroll_left_button_rect.SetWidth(self._art.GetScrollButtonMinimumSize(temp_dc, self,
RIBBON_SCROLL_BTN_LEFT | RIBBON_SCROLL_BTN_NORMAL |
RIBBON_SCROLL_BTN_FOR_TABS).GetWidth())
self._tab_scroll_left_button_rect.SetHeight(self._tab_height)
self._tab_scroll_left_button_rect.SetX(self._tab_margin_left)
self._tab_scroll_left_button_rect.SetY(0)
self._tab_scroll_right_button_rect.SetWidth(self._art.GetScrollButtonMinimumSize(temp_dc, self,
RIBBON_SCROLL_BTN_RIGHT | RIBBON_SCROLL_BTN_NORMAL |
RIBBON_SCROLL_BTN_FOR_TABS).GetWidth())
self._tab_scroll_right_button_rect.SetHeight(self._tab_height)
self._tab_scroll_right_button_rect.SetX(self.GetClientSize().GetWidth() - self._tab_margin_right - self._tab_scroll_right_button_rect.GetWidth())
self._tab_scroll_right_button_rect.SetY(0)
if self._tab_scroll_amount == 0:
self._tab_scroll_left_button_rect.SetWidth(0)
elif self._tab_scroll_amount + width >= self._tabs_total_width_minimum:
self._tab_scroll_amount = self._tabs_total_width_minimum - width
self._tab_scroll_right_button_rect.SetX(self._tab_scroll_right_button_rect.GetX() + self._tab_scroll_right_button_rect.GetWidth())
self._tab_scroll_right_button_rect.SetWidth(0)
for info in self._pages:
info.rect.x -= self._tab_scroll_amount
else:
self._tab_scroll_buttons_shown = False
self._tab_scroll_left_button_rect.SetWidth(0)
self._tab_scroll_right_button_rect.SetWidth(0)
# Complex case: everything sized such that: minimum <= width < ideal
#
# Strategy:
# 1) Uniformly reduce all tab widths from ideal to small_must_have_separator_width
# 2) Reduce the largest tab by 1 pixel, repeating until all tabs are same width (or at minimum)
# 3) Uniformly reduce all tabs down to their minimum width
#
smallest_tab_width = 10000
total_small_width = tabsep * (numtabs - 1)
for info in self._pages:
if info.small_must_have_separator_width < smallest_tab_width:
smallest_tab_width = info.small_must_have_separator_width
total_small_width += info.small_must_have_separator_width
if width >= total_small_width:
# Do (1)
total_delta = self._tabs_total_width_ideal - total_small_width
total_small_width -= tabsep*(numtabs - 1)
width -= tabsep*(numtabs - 1)
for info in self._pages:
delta = info.ideal_width - info.small_must_have_separator_width
info.rect.x = x
info.rect.y = y
info.rect.width = info.small_must_have_separator_width + delta*(width - total_small_width)/total_delta
info.rect.height = self._tab_height
x += info.rect.width + tabsep
total_delta -= delta
total_small_width -= info.small_must_have_separator_width
width -= info.rect.width
else:
total_small_width = tabsep*(numtabs - 1)
for info in self._pages:
if info.minimum_width < smallest_tab_width:
total_small_width += smallest_tab_width
else:
total_small_width += info.minimum_width
if width >= total_small_width:
# Do (2)
sorted_pages = []
for info in self._pages:
# Sneaky obj array trickery to not copy the tab descriptors
sorted_pages.append(info)
sorted_pages.sort(self.OrderPageTabInfoBySmallWidthAsc)
width -= tabsep*(numtabs - 1)
for i, info in enumerate(self._pages):
if info.small_must_have_separator_width*(numtabs - i) <= width:
info.rect.width = info.small_must_have_separator_width
else:
info.rect.width = width/(numtabs - i)
width -= info.rect.width
for i, info in enumerate(self._pages):
info.rect.x = x
info.rect.y = y
info.rect.height = self._tab_height
x += info.rect.width + tabsep
sorted_pages.pop(numtabs - (i + 1))
else:
# Do (3)
total_small_width = (smallest_tab_width + tabsep)*numtabs - tabsep
total_delta = total_small_width - self._tabs_total_width_minimum
total_small_width = self._tabs_total_width_minimum - tabsep*(numtabs - 1)
width -= tabsep*(numtabs - 1)
for info in self._pages:
delta = smallest_tab_width - info.minimum_width
info.rect.x = x
info.rect.y = y
info.rect.width = info.minimum_width + delta*(width - total_small_width)/total_delta
info.rect.height = self._tab_height
x += info.rect.width + tabsep
total_delta -= delta
total_small_width -= info.minimum_width
width -= info.rect.width
def CommonInit(self, agwStyle):
"""
Common initialization procedures.
:param integer `agwStyle`: the AGW-specific window style.
:see: :meth:`~RibbonBar.SetAGWWindowStyleFlag` for a list of valid window styles.
"""
self.SetName("RibbonBar")
self._flags = agwStyle
self._tabs_total_width_ideal = 0
self._tabs_total_width_minimum = 0
self._tab_margin_left = 50
self._tab_margin_right = 20
self._tab_height = 20 # initial guess
self._tab_scroll_amount = 0
self._current_page = -1
self._current_hovered_page = -1
self._tab_scroll_left_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_right_button_state = RIBBON_SCROLL_BTN_NORMAL
self._tab_scroll_buttons_shown = False
self._arePanelsShown = True
self._tab_scroll_left_button_rect = wx.Rect()
self._tab_scroll_right_button_rect = wx.Rect()
if not self._art:
self.SetArtProvider(RibbonMSWArtProvider())
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
def SetArtProvider(self, art):
"""
Set the art provider to be used be the ribbon bar.
Also sets the art provider on all current :class:`~lib.agw.ribbon.page.RibbonPage` children, and any
:class:`~lib.agw.ribbon.page.RibbonPage` children added in the future.
Note that unlike most other ribbon controls, the ribbon bar creates a default
art provider when initialised, so an explicit call to :meth:`~RibbonBar.SetArtProvider` is
not required if the default art provider is sufficient. Also unlike other
ribbon controls, the ribbon bar takes ownership of the given pointer, and
will delete it when the art provider is changed or the bar is destroyed.
If this behaviour is not desired, then clone the art provider before setting
it.
:param `art`: an art provider.
:note: Reimplemented from :class:`~lib.agw.ribbon.control.RibbonControl`.
"""
self._art = art
if art:
art.SetFlags(self._flags)
for info in self._pages:
if info.page.GetArtProvider() != art:
info.page.SetArtProvider(art)
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`RibbonBar`.
:param `event`: a :class:`PaintEvent` event to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
if not self.GetUpdateRegion().ContainsRect(wx.Rect(0, 0, self.GetClientSize().GetWidth(), self._tab_height)):
# Nothing to do in the tab area, and the page area is handled by the active page
return
self.DoEraseBackground(dc)
numtabs = len(self._pages)
sep_visibility = 0.0
draw_sep = False
tabs_rect = wx.Rect(self._tab_margin_left, 0, self.GetClientSize().GetWidth() - self._tab_margin_left - self._tab_margin_right, self._tab_height)
if self._tab_scroll_buttons_shown:
tabs_rect.x += self._tab_scroll_left_button_rect.GetWidth()
tabs_rect.width -= self._tab_scroll_left_button_rect.GetWidth() + self._tab_scroll_right_button_rect.GetWidth()
for info in self._pages:
dc.DestroyClippingRegion()
if self._tab_scroll_buttons_shown:
if not tabs_rect.Intersects(info.rect):
continue
dc.SetClippingRect(tabs_rect)
dc.SetClippingRect(info.rect)
self._art.DrawTab(dc, self, info)
if info.rect.width < info.small_begin_need_separator_width:
draw_sep = True
if info.rect.width < info.small_must_have_separator_width:
sep_visibility += 1.0
else:
sep_visibility += float(info.small_begin_need_separator_width - info.rect.width)/ \
float(info.small_begin_need_separator_width - info.small_must_have_separator_width)
if draw_sep:
rect = wx.Rect(*self._pages[0].rect)
rect.width = self._art.GetMetric(RIBBON_ART_TAB_SEPARATION_SIZE)
sep_visibility /= float(numtabs)
for i in xrange(0, numtabs-1):
info = self._pages[i]
rect.x = info.rect.x + info.rect.width
if self._tab_scroll_buttons_shown and not tabs_rect.Intersects(rect):
continue
dc.DestroyClippingRegion()
dc.SetClippingRect(rect)
self._art.DrawTabSeparator(dc, self, rect, sep_visibility)
if self._tab_scroll_buttons_shown:
dc.DestroyClippingRegion()
if self._tab_scroll_left_button_rect.GetWidth() != 0:
self._art.DrawScrollButton(dc, self, self._tab_scroll_left_button_rect, RIBBON_SCROLL_BTN_LEFT |
self._tab_scroll_left_button_state | RIBBON_SCROLL_BTN_FOR_TABS)
if self._tab_scroll_right_button_rect.GetWidth() != 0:
self._art.DrawScrollButton(dc, self, self._tab_scroll_right_button_rect, RIBBON_SCROLL_BTN_RIGHT |
self._tab_scroll_right_button_state | RIBBON_SCROLL_BTN_FOR_TABS)
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for :class:`RibbonBar`.
:param `event`: a :class:`EraseEvent` event to be processed.
"""
# Background painting done in main paint handler to reduce screen flicker
pass
def DoEraseBackground(self, dc):
"""
Does the initial painting of stuff from the :meth:`~RibbonBar.OnPaint` event.
:param `dc`: an instance of :class:`DC`.
"""
tabs = wx.Rect(0, 0, *self.GetSize())
tabs.height = self._tab_height
self._art.DrawTabCtrlBackground(dc, self, tabs)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for :class:`RibbonBar`.
:param `event`: a :class:`SizeEvent` event to be processed.
"""
self.RecalculateTabSizes()
if self._current_page != -1:
self.RepositionPage(self._pages[self._current_page].page)
self.RefreshTabBar()
event.Skip()
def RepositionPage(self, page):
w, h = self.GetSize()
page.SetSizeWithScrollButtonAdjustment(0, self._tab_height, w, h - self._tab_height)
def HitTestTabs(self, position):
"""
Hit test method for :class:`RibbonBar`, testing where the given (in client coordinates)
point lies.
:param `position`: an instance of :class:`Point` in client coordinates.
:return: a tuple containing the tab index and the :class:`~lib.agw.ribbon.page.RibbonPage` if the :meth:`~RibbonBar.HitTestTabs`
successfully found such combination, or a tuple `(-1, None)` if no tab has been hit.
"""
tabs_rect = wx.Rect(self._tab_margin_left, 0, self.GetClientSize().GetWidth() - self._tab_margin_left - self._tab_margin_right, self._tab_height)
if self._tab_scroll_buttons_shown:
tabs_rect.SetX(tabs_rect.GetX() + self._tab_scroll_left_button_rect.GetWidth())
tabs_rect.SetWidth(tabs_rect.GetWidth() - self._tab_scroll_left_button_rect.GetWidth() - self._tab_scroll_right_button_rect.GetWidth())
if tabs_rect.Contains(position):
for i, info in enumerate(self._pages):
if info.rect.Contains(position):
return i, info
return -1, None
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
index, tab = self.HitTestTabs(event.GetPosition())
if tab and tab != self._pages[self._current_page]:
query = RibbonBarEvent(wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGING, self.GetId(), tab.page)
query.SetEventObject(self)
self.GetEventHandler().ProcessEvent(query)
if query.IsAllowed():
self.SetActivePage(query.GetPage())
notification = RibbonBarEvent(wxEVT_COMMAND_RIBBONBAR_PAGE_CHANGED, self.GetId(), self._pages[self._current_page].page)
notification.SetEventObject(self)
self.GetEventHandler().ProcessEvent(notification)
elif tab == None:
if self._tab_scroll_left_button_rect.Contains(event.GetPosition()):
self._tab_scroll_left_button_state |= RIBBON_SCROLL_BTN_ACTIVE | RIBBON_SCROLL_BTN_HOVERED
self.RefreshTabBar()
elif self._tab_scroll_right_button_rect.Contains(event.GetPosition()):
self._tab_scroll_right_button_state |= RIBBON_SCROLL_BTN_ACTIVE | RIBBON_SCROLL_BTN_HOVERED
self.RefreshTabBar()
def OnMouseDoubleClick(self, event):
"""
Handles the ``wx.EVT_LEFT_DCLICK`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.DoMouseButtonCommon(event, wxEVT_COMMAND_RIBBONBAR_TAB_LEFT_DCLICK)
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
if not self._tab_scroll_buttons_shown:
return
amount = 0
if self._tab_scroll_left_button_state & RIBBON_SCROLL_BTN_ACTIVE:
amount = -1
elif self._tab_scroll_right_button_state & RIBBON_SCROLL_BTN_ACTIVE:
amount = 1
if amount != 0:
self._tab_scroll_left_button_state &= ~RIBBON_SCROLL_BTN_ACTIVE
self._tab_scroll_right_button_state &= ~RIBBON_SCROLL_BTN_ACTIVE
self.ScrollTabBar(amount*8)
def ScrollTabBar(self, amount):
"""
Scrolls the tab area left/right/up/down by the specified `amount`.
:param integer `amount`: the amount by which the tab area is scrolled, in pixels.
"""
show_left = True
show_right = True
if self._tab_scroll_amount + amount <= 0:
amount = -self._tab_scroll_amount
show_left = False
elif self._tab_scroll_amount + amount + (self.GetClientSize().GetWidth() - \
self._tab_margin_left - self._tab_margin_right) >= \
self._tabs_total_width_minimum:
amount = self._tabs_total_width_minimum - self._tab_scroll_amount - \
(self.GetClientSize().GetWidth() - self._tab_margin_left - self._tab_margin_right)
show_right = False
if amount == 0:
return
self._tab_scroll_amount += amount
for info in self._pages:
info.rect.SetX(info.rect.GetX() - amount)
if show_right != (self._tab_scroll_right_button_rect.GetWidth() != 0) or \
show_left != (self._tab_scroll_left_button_rect.GetWidth() != 0):
temp_dc = wx.ClientDC(self)
if show_left:
self._tab_scroll_left_button_rect.SetWidth(self._art.GetScrollButtonMinimumSize(temp_dc, self, RIBBON_SCROLL_BTN_LEFT |
RIBBON_SCROLL_BTN_NORMAL |
RIBBON_SCROLL_BTN_FOR_TABS).GetWidth())
else:
self._tab_scroll_left_button_rect.SetWidth(0)
if show_right:
if self._tab_scroll_right_button_rect.GetWidth() == 0:
self._tab_scroll_right_button_rect.SetWidth(self._art.GetScrollButtonMinimumSize(temp_dc, self,
RIBBON_SCROLL_BTN_RIGHT |
RIBBON_SCROLL_BTN_NORMAL |
RIBBON_SCROLL_BTN_FOR_TABS).GetWidth())
self._tab_scroll_right_button_rect.SetX(self._tab_scroll_right_button_rect.GetX() - self._tab_scroll_right_button_rect.GetWidth())
else:
if self._tab_scroll_right_button_rect.GetWidth() != 0:
self._tab_scroll_right_button_rect.SetX(self._tab_scroll_right_button_rect.GetX() + self._tab_scroll_right_button_rect.GetWidth())
self._tab_scroll_right_button_rect.SetWidth(0)
self.RefreshTabBar()
def RefreshTabBar(self):
""" Repaints the tab area in :class:`RibbonBar`. """
tab_rect = wx.Rect(0, 0, self.GetClientSize().GetWidth(), self._tab_height)
self.Refresh(False, tab_rect)
def OnMouseMiddleDown(self, event):
"""
Handles the ``wx.EVT_MIDDLE_DOWN`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.DoMouseButtonCommon(event, wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_DOWN)
def OnMouseMiddleUp(self, event):
"""
Handles the ``wx.EVT_MIDDLE_UP`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.DoMouseButtonCommon(event, wxEVT_COMMAND_RIBBONBAR_TAB_MIDDLE_UP)
def OnMouseRightDown(self, event):
"""
Handles the ``wx.EVT_RIGHT_DOWN`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.DoMouseButtonCommon(event, wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_DOWN)
def OnMouseRightUp(self, event):
"""
Handles the ``wx.EVT_RIGHT_UP`` event for :class:`RibbonBar`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
self.DoMouseButtonCommon(event, wxEVT_COMMAND_RIBBONBAR_TAB_RIGHT_UP)
def DoMouseButtonCommon(self, event, tab_event_type):
"""
Common methods for all the mouse move/click events.
:param `event`: a :class:`MouseEvent` event to be processed;
:param integer `tab_event_type`: one of the :class:`RibbonBar` events.
"""
index, tab = self.HitTestTabs(event.GetPosition())
if tab:
notification = RibbonBarEvent(tab_event_type, self.GetId(), tab.page)
notification.SetEventObject(self)
self.GetEventHandler().ProcessEvent(notification)
def RecalculateMinSize(self):
""" Recalculates the :class:`RibbonBar` minimum size. """
min_size = wx.Size(-1, -1)
numtabs = len(self._pages)
if numtabs != 0:
min_size = wx.Size(*self._pages[0].page.GetMinSize())
for info in self._pages:
page_min = info.page.GetMinSize()
min_size.x = max(min_size.x, page_min.x)
min_size.y = max(min_size.y, page_min.y)
if min_size.y != -1:
# TODO: Decide on best course of action when min height is unspecified
# - should we specify it to the tab minimum, or leave it unspecified?
min_size.IncBy(0, self._tab_height)
self._minWidth = min_size.GetWidth()
self._minHeight = (self._arePanelsShown and [min_size.GetHeight()] or [self._tab_height])[0]
def DoGetBestSize(self):
"""
Gets the size which best suits the window: for a control, it would be the
minimal size which doesn't truncate the control, for a panel - the same size
as it would have after a call to `Fit()`.
:return: An instance of :class:`Size`.
:note: Overridden from :class:`PyControl`.
"""
best = wx.Size(0, 0)
if self._current_page != -1:
best = wx.Size(*self._pages[self._current_page].page.GetBestSize())
if best.GetHeight() == -1:
best.SetHeight(self._tab_height)
else:
best.IncBy(0, self._tab_height)
if not self._arePanelsShown:
best.SetHeight(self._tab_height)
return best
def HasMultiplePages(self):
"""
This method should be overridden to return true if this window has multiple pages.
All standard class with multiple pages such as :class:`Notebook`, :class:`Listbook` and :class:`Treebook`
already override it to return true and user-defined classes with similar behaviour should also
do so, to allow the library to handle such windows appropriately.
"""
return True
def GetDefaultBorder(self):
""" Returns the default border style for :class:`RibbonBar`. """
return wx.BORDER_NONE
| gpl-3.0 |
2ndQuadrant/ansible | lib/ansible/modules/network/checkpoint/checkpoint_object_facts.py | 48 | 3487 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_object_facts
short_description: Get object facts on Check Point over Web Services API
description:
- Get object facts on Check Point devices.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
uid:
description:
- UID of the object. If UID is not provided, it will do a full search
which can be filtered with the filter argument.
object_filter:
description:
- Filter expression for search. It accepts AND/OR logical operators and performs a textual
and IP address search. To search only by IP address, set ip_only argument to True.
which can be filtered with the filter argument.
ip_only:
description:
- Filter only by IP address.
type: bool
default: false
object_type:
description:
- Type of the object to search. Must be a valid API resource name
type: str
"""
EXAMPLES = """
- name: Get object facts
checkpoint_object_facts:
object_filter: 192.168.30.30
ip_only: yes
"""
RETURN = """
ansible_hosts:
description: The checkpoint object facts.
returned: always.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.six.moves.urllib.error import HTTPError
import json
def get_object(module, connection):
uid = module.params['uid']
object_filter = module.params['object_filter']
ip_only = module.params['ip_only']
object_type = module.params['object_type']
if uid:
payload = {'uid': uid}
code, result = connection.send_request('/web_api/show-object', payload)
else:
payload = {'filter': object_filter, 'ip-only': ip_only, 'type': object_type}
code, result = connection.send_request('/web_api/show-objects', payload)
return code, result
def main():
argument_spec = dict(
uid=dict(type='str', default=None),
object_filter=dict(type='str', default=None),
ip_only=dict(type='bool', default=False),
object_type=dict(type='str', default=None)
)
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = get_object(module, connection)
if code == 200:
module.exit_json(ansible_facts=dict(checkpoint_objects=response))
else:
module.fail_json(msg='Check Point device returned error {0} with message {1}'.format(code, response))
if __name__ == '__main__':
main()
| gpl-3.0 |
qiudesong/coreclr | src/ToolBox/SOS/tests/t_cmd_gcroot.py | 43 | 1753 | import lldb
import re
import testutils as test
def runScenario(assembly, debugger, target):
process = target.GetProcess()
res = lldb.SBCommandReturnObject()
ci = debugger.GetCommandInterpreter()
# Run debugger, wait until libcoreclr is loaded,
# set breakpoint at Test.Main and stop there
test.stop_in_main(debugger, assembly)
ci.HandleCommand("dso", res)
print(res.GetOutput())
print(res.GetError())
# Interpreter must have this command and able to run it
test.assertTrue(res.Succeeded())
output = res.GetOutput()
# Output is not empty
test.assertTrue(len(output) > 0)
# Get all objects
objects = []
for line in output.split('\n'):
match = re.match('([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s', line)
# Not all lines list objects
if match:
groups = match.groups()
# Match has exactly two subgroups
test.assertEqual(len(groups), 2)
obj_addr = groups[1]
# Address must be a hex number
test.assertTrue(test.is_hexnum(obj_addr))
objects.append(obj_addr)
# There must be at least one object
test.assertTrue(len(objects) > 0)
for obj in objects:
ci.HandleCommand("gcroot " + obj, res)
print(res.GetOutput())
print(res.GetError())
# Interpreter must have this command and able to run it
test.assertTrue(res.Succeeded())
output = res.GetOutput()
# Output is not empty
test.assertTrue(len(output) > 0)
match = re.search('Found', output)
test.assertTrue(match)
# TODO: test other use cases
# Continue current process and checks its exit code
test.exit_lldb(debugger, assembly)
| mit |
telefonicaid/fiware-sdc | test/acceptance/component/get_productandrelease_list/features/steps.py | 2 | 5640 | from lettuce import step, world
from commons.product_steps import ProductSteps
from commons.rest_utils import RestUtils
from commons.constants import *
from commons.utils import response_body_to_dict, replace_none_value_metadata_to_empty_string
from nose.tools import assert_equals, assert_true, assert_false, assert_in
api_utils = RestUtils()
product_steps = ProductSteps()
def check_if_product_is_in_list(response, product_release):
"""
Checks if product is in response list with his attribute and metadatas
- Assertions:
* Metadata (default metadatas)
* Attributes (if exist)
* Product existence in list
:param response: Response from API - dic
:param product_release: Product release version - str
:return: None
"""
found = False
for product_and_release in response:
if product_and_release[PRODUCT][PRODUCT_NAME] == world.product_name:
if product_release is None or product_and_release[VERSION] == product_release:
found = True
for metadata in DEFAULT_METADATA[METADATA]:
# Workaround: xmldict manage Empty values as None value
replace_none_value_metadata_to_empty_string(product_and_release[PRODUCT][PRODUCT_METADATAS])
assert_in(metadata, product_and_release[PRODUCT][PRODUCT_METADATAS],
"Metadata are not the expected!")
if world.attributes is not None:
assert_equals(product_and_release[PRODUCT][PRODUCT_ATTRIBUTES], world.attributes,
"Attributes are not expected!")
break
assert_true(found, "Product and release not found in list!")
@step(u'a created product with this name "([^"]*)"')
def a_created_product_with_name(step, product_name):
world.product_name = product_name
product_steps.a_created_product_with_name(step, product_name)
@step(u'a created product with name "([^"]*)" and release "([^"]*)"')
def a_created_product_with_name_group1_and_release_group2(step, product_name, product_release):
world.product_name = product_name
world.product_release = product_release
product_steps.a_created_product_with_name_and_release(step=step, product_name=product_name,
product_version=product_release)
@step(u'a created product with name "([^"]*)" and releases:')
def a_create_product_with_name_group1_and_releases(step, product_name):
world.product_name = product_name
world.product_release = []
for row in step.hashes:
world.product_release.append(row['release'])
product_steps.a_created_product_with_name_and_release_list(step, product_name, world.product_release)
@step(u'accept header value "([^"]*)"')
def accept_header_value_group1(step, accept_header):
world.headers[ACCEPT_HEADER] = accept_header
@step(u'default product attributes')
def default_product_attributes(step):
world.attributes = DEFAULT_ATTRIBUTE[ATTRIBUTE]
@step(u'the authentication token "([^"]*)":')
def the_authentication_token_group1(step, token):
world.headers[AUTH_TOKEN_HEADER] = token
@step(u'the authentication tenant-id "([^"]*)"')
def the_authentication_tenant_id_group1(step, tenant_id):
world.headers[TENANT_ID_HEADER] = tenant_id
@step(u'I retrieve the product list with its releases')
def i_retrieve_the_product_list_with_its_releases(step):
world.response = api_utils.retrieve_productandrelease_list(headers=world.headers)
@step(u'I use a invalid HTTP "([^"]*)" method')
def i_use_a_invalid_http_group1_method(step, http_method):
world.response = api_utils.request_productandrelease(headers=world.headers, method=http_method)
@step(u'the list is returned')
def the_list_is_returned(step):
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
response_headers = world.response.headers
assert_in(response_headers[CONTENT_TYPE], world.headers[ACCEPT_HEADER],
'RESPONSE HEADERS: {}'.format(world.response.headers))
@step(u'the product with its release is in the list')
def the_product_with_its_release_is_in_the_list(step):
response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True)
assert_true(len(response) != 0)
check_if_product_is_in_list(response, world.product_release)
@step(u'the product with all its releases is in the list')
def the_product_with_all_its_releases_is_in_the_list(step):
response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True)
for release in world.product_release:
check_if_product_is_in_list(response, release)
@step(u'the product is not in the list')
def the_product_is_not_in_the_list(step):
response = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCTANDRELEASE_LIST, is_list=True)
found = False
if len(response) != 0:
for product_and_release in response:
if product_and_release[PRODUCT][PRODUCT_NAME] == world.product_name:
found = True
break
assert_false(found, "Product is in the list and it shouldn't!")
@step(u'I obtain an http error code "([^"]*)"')
def i_obtain_an_http_error_code_group1(step, error_code):
assert_equals(str(world.response.status_code), error_code)
| apache-2.0 |
samuel1208/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
dronefly/dronefly.github.io | flask/lib/python2.7/site-packages/coverage/control.py | 27 | 44651 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Core control stuff for coverage.py."""
import atexit
import inspect
import os
import platform
import re
import sys
import traceback
from coverage import env, files
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class, iitems
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData, CoverageDataFiles
from coverage.debug import DebugControl
from coverage.files import TreeMatcher, FnmatchMatcher
from coverage.files import PathAliases, find_python_files, prep_patterns
from coverage.files import ModuleMatcher, abs_file
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.misc import file_be_gone, isolate_module
from coverage.monkey import patch_multiprocessing
from coverage.plugin import FileReporter
from coverage.plugin_support import Plugins
from coverage.python import PythonFileReporter
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
os = isolate_module(os)
# Pypy has some unusual stuff in the "stdlib". Consider those locations
# when deciding where the stdlib is.
try:
import _structseq
except ImportError:
_structseq = None
class Coverage(object):
"""Programmatic access to coverage.py.
To use::
from coverage import Coverage
cov = Coverage()
cov.start()
#.. call your code ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(
self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None, debug=None,
concurrency=None,
):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what configuration file to read:
* If it is ".coveragerc", it is interpreted as if it were True,
for backward compatibility.
* If it is a string, it is the name of the file to read. If the
file can't be read, it is an error.
* If it is True, then a few standard files names are tried
(".coveragerc", "setup.cfg"). It is not an error for these files
to not be found.
* If it is False, then no configuration file is read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of file name patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
`debug` is a list of strings indicating what debugging information is
desired.
`concurrency` is a string indicating the concurrency library being used
in the measured code. Without this, coverage.py will get incorrect
results. Valid strings are "greenlet", "eventlet", "gevent", or
"thread" (the default).
.. versionadded:: 4.0
The `concurrency` parameter.
"""
# Build our configuration from a number of sources:
# 1: defaults:
self.config = CoverageConfig()
# 2: from the rcfile, .coveragerc or setup.cfg file:
if config_file:
did_read_rc = False
# Some API users were specifying ".coveragerc" to mean the same as
# True, so make it so.
if config_file == ".coveragerc":
config_file = True
specified_file = (config_file is not True)
if not specified_file:
config_file = ".coveragerc"
did_read_rc = self.config.from_file(config_file)
if not did_read_rc:
if specified_file:
raise CoverageException(
"Couldn't read '%s' as a config file" % config_file
)
self.config.from_file("setup.cfg", section_prefix="coverage:")
# 3: from environment variables:
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
self.config.data_file = env_data_file
debugs = os.environ.get('COVERAGE_DEBUG')
if debugs:
self.config.debug.extend(debugs.split(","))
# 4: from constructor arguments:
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include, debug=debug,
concurrency=concurrency,
)
self._debug_file = None
self._auto_data = auto_data
self._data_suffix = data_suffix
# The matchers for _should_trace.
self.source_match = None
self.source_pkgs_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
# Is it ok for no data to be collected?
self._warn_no_data = True
self._warn_unimported_source = True
# A record of all the warnings that have been issued.
self._warnings = []
# Other instance attributes, set later.
self.omit = self.include = self.source = None
self.source_pkgs = None
self.data = self.data_files = self.collector = None
self.plugins = None
self.pylib_dirs = self.cover_dirs = None
self.data_suffix = self.run_suffix = None
self._exclude_re = None
self.debug = None
# State machine variables:
# Have we initialized everything?
self._inited = False
# Have we started collecting and not stopped it?
self._started = False
# Have we measured some data and not harvested it?
self._measured = False
def _init(self):
"""Set all the initial state.
This is called by the public methods to initialize state. This lets us
construct a :class:`Coverage` object, then tweak its state before this
function is called.
"""
if self._inited:
return
# Create and configure the debugging controller. COVERAGE_DEBUG_FILE
# is an environment variable, the name of a file to append debug logs
# to.
if self._debug_file is None:
debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
if debug_file_name:
self._debug_file = open(debug_file_name, "a")
else:
self._debug_file = sys.stderr
self.debug = DebugControl(self.config.debug, self._debug_file)
# Load plugins
self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
# _exclude_re is a dict that maps exclusion list names to compiled
# regexes.
self._exclude_re = {}
self._exclude_regex_stale()
files.set_relative_directory()
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
for src in self.config.source or []:
if os.path.exists(src):
self.source.append(files.canonical_filename(src))
else:
self.source_pkgs.append(src)
self.omit = prep_patterns(self.config.omit)
self.include = prep_patterns(self.config.include)
concurrency = self.config.concurrency
if concurrency == "multiprocessing":
patch_multiprocessing()
concurrency = None
self.collector = Collector(
should_trace=self._should_trace,
check_include=self._check_include_omit_etc,
timid=self.config.timid,
branch=self.config.branch,
warn=self._warn,
concurrency=concurrency,
)
# Early warning if we aren't going to be able to support plugins.
if self.plugins.file_tracers and not self.collector.supports_plugins:
self._warn(
"Plugin file tracers (%s) aren't supported with %s" % (
", ".join(
plugin._coverage_plugin_name
for plugin in self.plugins.file_tracers
),
self.collector.tracer_name(),
)
)
for plugin in self.plugins.file_tracers:
plugin._coverage_enabled = False
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self.run_suffix` now, and promote it to `self.data_suffix` if we
# find that we are collecting data later.
if self._data_suffix or self.config.parallel:
if not isinstance(self._data_suffix, string_class):
# if data_suffix=True, use .machinename.pid.random
self._data_suffix = True
else:
self._data_suffix = None
self.data_suffix = None
self.run_suffix = self._data_suffix
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(debug=self.debug)
self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn)
# The directories for files considered "installed with the interpreter".
self.pylib_dirs = set()
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, inspect, os, platform, re, _structseq, traceback):
if m is not None and hasattr(m, "__file__"):
self.pylib_dirs.add(self._canonical_dir(m))
if _structseq and not hasattr(_structseq, '__file__'):
# PyPy 2.4 has no __file__ in the builtin modules, but the code
# objects still have the file names. So dig into one to find
# the path to exclude.
structseq_new = _structseq.structseq_new
try:
structseq_file = structseq_new.func_code.co_filename
except AttributeError:
structseq_file = structseq_new.__code__.co_filename
self.pylib_dirs.add(self._canonical_dir(structseq_file))
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
self.cover_dirs = [self._canonical_dir(__file__)]
if env.TESTING:
# When testing, we use PyContracts, which should be considered
# part of coverage.py, and it uses six. Exclude those directories
# just as we exclude ourselves.
import contracts, six
for mod in [contracts, six]:
self.cover_dirs.append(self._canonical_dir(mod))
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
atexit.register(self._atexit)
self._inited = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
else:
if self.cover_dirs:
self.cover_match = TreeMatcher(self.cover_dirs)
if self.pylib_dirs:
self.pylib_match = TreeMatcher(self.pylib_dirs)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
# The user may want to debug things, show info if desired.
wrote_any = False
if self.debug.should('config'):
config_info = sorted(self.config.__dict__.items())
self.debug.write_formatted_info("config", config_info)
wrote_any = True
if self.debug.should('sys'):
self.debug.write_formatted_info("sys", self.sys_info())
for plugin in self.plugins:
header = "sys: " + plugin._coverage_plugin_name
info = plugin.sys_info()
self.debug.write_formatted_info(header, info)
wrote_any = True
if wrote_any:
self.debug.write_formatted_info("end", ())
def _canonical_dir(self, morf):
"""Return the canonical directory of the module or file `morf`."""
morf_filename = PythonFileReporter(morf, self).filename
return os.path.split(morf_filename)[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`.
Given a file name being traced, return the best guess as to the source
file to attribute it to.
"""
if filename.endswith(".py"):
# .py files are themselves source files.
return filename
elif filename.endswith((".pyc", ".pyo")):
# Bytecode files probably have source files near them.
py_filename = filename[:-1]
if os.path.exists(py_filename):
# Found a .py file, use that.
return py_filename
if env.WINDOWS:
# On Windows, it could be a .pyw file.
pyw_filename = py_filename + "w"
if os.path.exists(pyw_filename):
return pyw_filename
# Didn't find source, but it's probably the .py file we want.
return py_filename
elif filename.endswith("$py.class"):
# Jython is easy to guess.
return filename[:-9] + ".py"
# No idea, just use the file name as-is.
return filename
def _name_for_module(self, module_globals, filename):
"""Get the name of the module for a set of globals and file name.
For configurability's sake, we allow __main__ modules to be matched by
their importable name.
If loaded via runpy (aka -m), we can usually recover the "original"
full dotted module name, otherwise, we resort to interpreting the
file name to get the module's name. In the case that the module name
can't be determined, None is returned.
"""
dunder_name = module_globals.get('__name__', None)
if isinstance(dunder_name, str) and dunder_name != '__main__':
# This is the usual case: an imported module.
return dunder_name
loader = module_globals.get('__loader__', None)
for attrname in ('fullname', 'name'): # attribute renamed in py3.2
if hasattr(loader, attrname):
fullname = getattr(loader, attrname)
else:
continue
if isinstance(fullname, str) and fullname != '__main__':
# Module loaded via: runpy -m
return fullname
# Script as first argument to Python command line.
inspectedname = inspect.getmodulename(filename)
if inspectedname is not None:
return inspectedname
else:
return dunder_name
def _should_trace_internal(self, filename, frame):
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a FileDisposition object.
"""
original_filename = filename
disp = _disposition_init(self.collector.file_disposition_class, filename)
def nope(disp, reason):
"""Simple helper to make it easy to return NO."""
disp.trace = False
disp.reason = reason
return disp
# Compiled Python files have two file names: frame.f_code.co_filename is
# the file name at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals.get('__file__')
if dunder_file:
filename = self._source_for_file(dunder_file)
if original_filename and not original_filename.startswith('<'):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
# exec'ing code. If it seems like something is wrong with
# the frame's file name, then just use the original.
filename = original_filename
if not filename:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
if filename.startswith('memory:'):
return nope(disp, "memory isn't traceable")
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# file names like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return nope(disp, "not a real file name")
# pyexpat does a dumb thing, calling the trace function explicitly from
# C code with a C file name.
if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
return nope(disp, "pyexpat lies about itself")
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = files.canonical_filename(filename)
disp.canonical_filename = canonical
# Try the plugins, see if they have an opinion about the file.
plugin = None
for plugin in self.plugins.file_tracers:
if not plugin._coverage_enabled:
continue
try:
file_tracer = plugin.file_tracer(canonical)
if file_tracer is not None:
file_tracer._coverage_plugin = plugin
disp.trace = True
disp.file_tracer = file_tracer
if file_tracer.has_dynamic_source_filename():
disp.has_dynamic_filename = True
else:
disp.source_filename = files.canonical_filename(
file_tracer.source_filename()
)
break
except Exception:
self._warn(
"Disabling plugin %r due to an exception:" % (
plugin._coverage_plugin_name
)
)
traceback.print_exc()
plugin._coverage_enabled = False
continue
else:
# No plugin wanted it: it's Python.
disp.trace = True
disp.source_filename = canonical
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise CoverageException(
"Plugin %r didn't set source_filename for %r" %
(plugin, disp.original_filename)
)
reason = self._check_include_omit_etc_internal(
disp.source_filename, frame,
)
if reason:
nope(disp, reason)
return disp
def _check_include_omit_etc_internal(self, filename, frame):
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
why. None means no reason found to not trace.
"""
modulename = self._name_for_module(frame.f_globals, filename)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match:
if self.source_pkgs_match.match(modulename):
if modulename in self.source_pkgs:
self.source_pkgs.remove(modulename)
return None # There's no reason to skip this file.
if not self.source_match.match(filename):
return "falls outside the --source trees"
elif self.include_match:
if not self.include_match.match(filename):
return "falls outside the --include trees"
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(filename):
return "is in the stdlib"
# We exclude the coverage.py code itself, since a little of it
# will be measured otherwise.
if self.cover_match and self.cover_match.match(filename):
return "is part of coverage.py"
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(filename):
return "is inside an --omit pattern"
# No reason found to skip this file.
return None
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`.
Calls `_should_trace_internal`, and returns the FileDisposition.
"""
disp = self._should_trace_internal(filename, frame)
if self.debug.should('trace'):
self.debug.write(_disposition_debug_msg(disp))
return disp
def _check_include_omit_etc(self, filename, frame):
"""Check a file name against the include/omit/etc, rules, verbosely.
Returns a boolean: True if the file should be traced, False if not.
"""
reason = self._check_include_omit_etc_internal(filename, frame)
if self.debug.should('trace'):
if not reason:
msg = "Including %r" % (filename,)
else:
msg = "Not including %r: %s" % (filename, reason)
self.debug.write(msg)
return not reason
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
if self.debug.should('pid'):
msg = "[%d] %s" % (os.getpid(), msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
.. versionadded:: 4.0
"""
return self.config.get_option(option_name)
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with ``"run:branch"``.
`value` is the new value for the option. This should be a Python
value where appropriate. For example, use True for booleans, not the
string ``"True"``.
As an example, calling::
cov.set_option("run:branch", True)
has the same effect as this configuration file::
[run]
branch = True
.. versionadded:: 4.0
"""
self.config.set_option(option_name, value)
def use_cache(self, usecache):
"""Obsolete method."""
self._init()
if not usecache:
self._warn("use_cache(False) is no longer supported.")
def load(self):
"""Load previously-collected coverage data from the data file."""
self._init()
self.collector.reset()
self.data_files.read(self.data)
def start(self):
"""Start measuring code coverage.
Coverage measurement actually occurs in functions called after
:meth:`start` is invoked. Statements in the same scope as
:meth:`start` won't be measured.
Once you invoke :meth:`start`, you must also call :meth:`stop`
eventually, or your process might not shut down cleanly.
"""
self._init()
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self._auto_data:
self.load()
self.collector.start()
self._started = True
self._measured = True
def stop(self):
"""Stop measuring code coverage."""
if self._started:
self.collector.stop()
self._started = False
def _atexit(self):
"""Clean up on process shutdown."""
if self._started:
self.stop()
if self._auto_data:
self.save()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self._init()
self.collector.reset()
self.data.erase()
self.data_files.erase(parallel=self.config.parallel)
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
self._init()
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
self._init()
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See :meth:`exclude` for the
lists that are available, and their meaning.
"""
self._init()
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
self._init()
self.get_data()
self.data_files.write(self.data, suffix=self.data_suffix)
def combine(self, data_paths=None):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
`data_paths` is a list of files or directories from which data should
be combined. If no list is passed, then the data files from the
directory indicated by the current data file (probably the current
directory) will be combined.
.. versionadded:: 4.0
The `data_paths` parameter.
"""
self._init()
self.get_data()
aliases = None
if self.config.paths:
aliases = PathAliases()
for paths in self.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths)
def get_data(self):
"""Get the collected data and reset the collector.
Also warn about various problems collecting data.
Returns a :class:`coverage.CoverageData`, the collected coverage data.
.. versionadded:: 4.0
"""
self._init()
if not self._measured:
return self.data
self.collector.save_data(self.data)
# If there are still entries in the source_pkgs list, then we never
# encountered those packages.
if self._warn_unimported_source:
for pkg in self.source_pkgs:
if pkg not in sys.modules:
self._warn("Module %s was never imported." % pkg)
elif not (
hasattr(sys.modules[pkg], '__file__') and
os.path.exists(sys.modules[pkg].__file__)
):
self._warn("Module %s has no Python source." % pkg)
else:
self._warn("Module %s was previously imported, but not measured." % pkg)
# Find out if we got any data.
if not self.data and self._warn_no_data:
self._warn("No data was collected.")
# Find files that were never executed at all.
for src in self.source:
for py_file in find_python_files(src):
py_file = files.canonical_filename(py_file)
if self.omit_match and self.omit_match.match(py_file):
# Turns out this file was omitted, so don't pull it back
# in as unexecuted.
continue
self.data.touch_file(py_file)
if self.config.note:
self.data.add_run_info(note=self.config.note)
self._measured = False
return self.data
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a file name. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The file name for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
self._init()
analysis = self._analyze(morf)
return (
analysis.filename,
sorted(analysis.statements),
sorted(analysis.excluded),
sorted(analysis.missing),
analysis.missing_formatted(),
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
self.get_data()
if not isinstance(it, FileReporter):
it = self._get_file_reporter(it)
return Analysis(self.data, it)
def _get_file_reporter(self, morf):
"""Get a FileReporter for a module or file name."""
plugin = None
file_reporter = "python"
if isinstance(morf, string_class):
abs_morf = abs_file(morf)
plugin_name = self.data.file_tracer(abs_morf)
if plugin_name:
plugin = self.plugins.get(plugin_name)
if plugin:
file_reporter = plugin.file_reporter(abs_morf)
if file_reporter is None:
raise CoverageException(
"Plugin %r did not provide a file reporter for %r." % (
plugin._coverage_plugin_name, morf
)
)
if file_reporter == "python":
file_reporter = PythonFileReporter(morf, self)
return file_reporter
def _get_file_reporters(self, morfs=None):
"""Get a list of FileReporters for a list of modules or file names.
For each module or file name in `morfs`, find a FileReporter. Return
the list of FileReporters.
If `morfs` is a single module or file name, this returns a list of one
FileReporter. If `morfs` is empty or None, then the list of all files
measured is used to find the FileReporters.
"""
if not morfs:
morfs = self.data.measured_files()
# Be sure we have a list.
if not isinstance(morfs, (list, tuple)):
morfs = [morfs]
file_reporters = []
for morf in morfs:
file_reporter = self._get_file_reporter(morf)
file_reporters.append(file_reporter)
return file_reporters
def report(
self, morfs=None, show_missing=True, ignore_errors=None,
file=None, # pylint: disable=redefined-builtin
omit=None, include=None, skip_covered=False,
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of file name patterns. Files that match will be
included in the report. Files matching `omit` will not be included in
the report.
Returns a float, the total percentage covered.
"""
self.get_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
show_missing=show_missing, skip_covered=skip_covered,
)
reporter = SummaryReporter(self, self.config)
return reporter.report(morfs, outfile=file)
def annotate(
self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None,
):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See :meth:`report` for other arguments.
"""
self.get_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = AnnotateReporter(self, self.config)
reporter.report(morfs, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None, extra_css=None, title=None):
"""Generate an HTML report.
The HTML is written to `directory`. The file "index.html" is the
overview starting point, with links to more detailed pages for
individual modules.
`extra_css` is a path to a file of other CSS to apply on the page.
It will be copied into the HTML directory.
`title` is a text string (not HTML) to use as the title of the HTML
report.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
"""
self.get_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
html_dir=directory, extra_css=extra_css, html_title=title,
)
reporter = HtmlReporter(self, self.config)
return reporter.report(morfs)
def xml_report(
self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None,
):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
"""
self.get_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
delete_file = False
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
# Ensure that the output directory is created; done here
# because this report pre-opens the output file.
# HTMLReport does this using the Report plumbing because
# its task is more complex, being multiple files.
output_dir = os.path.dirname(self.config.xml_output)
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
open_kwargs = {}
if env.PY3:
open_kwargs['encoding'] = 'utf8'
outfile = open(self.config.xml_output, "w", **open_kwargs)
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config)
return reporter.report(morfs, outfile=outfile)
except CoverageException:
delete_file = True
raise
finally:
if file_to_close:
file_to_close.close()
if delete_file:
file_be_gone(self.config.xml_output)
def sys_info(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
self._init()
ft_plugins = []
for ft in self.plugins.file_tracers:
ft_name = ft._coverage_plugin_name
if not ft._coverage_enabled:
ft_name += " (disabled)"
ft_plugins.append(ft_name)
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dirs', self.cover_dirs),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
('plugins.file_tracers', ft_plugins),
('config_files', self.config.attempted_config_files),
('configs_read', self.config.config_files),
('data_path', self.data_files.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', platform.python_implementation()),
('executable', sys.executable),
('cwd', os.getcwd()),
('path', sys.path),
('environment', sorted(
("%s = %s" % (k, v))
for k, v in iitems(os.environ)
if k.startswith(("COV", "PY"))
)),
('command_line', " ".join(getattr(sys, 'argv', ['???']))),
]
matcher_names = [
'source_match', 'source_pkgs_match',
'include_match', 'omit_match',
'cover_match', 'pylib_match',
]
for matcher_name in matcher_names:
matcher = getattr(self, matcher_name)
if matcher:
matcher_info = matcher.info()
else:
matcher_info = '-none-'
info.append((matcher_name, matcher_info))
return info
# FileDisposition "methods": FileDisposition is a pure value object, so it can
# be implemented in either C or Python. Acting on them is done with these
# functions.
def _disposition_init(cls, original_filename):
"""Construct and initialize a new FileDisposition object."""
disp = cls()
disp.original_filename = original_filename
disp.canonical_filename = original_filename
disp.source_filename = None
disp.trace = False
disp.reason = ""
disp.file_tracer = None
disp.has_dynamic_filename = False
return disp
def _disposition_debug_msg(disp):
"""Make a nice debug message of what the FileDisposition is doing."""
if disp.trace:
msg = "Tracing %r" % (disp.original_filename,)
if disp.file_tracer:
msg += ": will be traced by %r" % disp.file_tracer
else:
msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
return msg
def process_startup():
"""Call this at Python start-up to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if not cps:
# No request for coverage, nothing to do.
return
# This function can be called more than once in a process. This happens
# because some virtualenv configurations make the same directory visible
# twice in sys.path. This means that the .pth file will be found twice,
# and executed twice, executing this function twice. We set a global
# flag (an attribute on this function) to indicate that coverage.py has
# already been started, so we can avoid doing it twice.
#
# https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
# details.
if hasattr(process_startup, "done"):
# We've annotated this function before, so we must have already
# started coverage.py in this process. Nothing to do.
return
process_startup.done = True
cov = Coverage(config_file=cps, auto_data=True)
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
| apache-2.0 |
prometheanfire/portage | pym/portage/tests/news/test_NewsItem.py | 12 | 3159 | # test_NewsItem.py -- Portage Unit Testing Functionality
# Copyright 2007 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
from portage.tests import TestCase
from portage.news import NewsItem
from portage.dbapi.virtual import testdbapi
from tempfile import mkstemp
# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
class NewsItemTestCase(TestCase):
"""These tests suck: they use your running config instead of making their own"""
fakeItem = """
Title: YourSQL Upgrades from 4.0 to 4.1
Author: Ciaran McCreesh <ciaranm@gentoo.org>
Content-Type: text/plain
Posted: 01-Nov-2005
Revision: 1
#Display-If-Installed:
#Display-If-Profile:
#Display-If-Arch:
YourSQL databases created using YourSQL version 4.0 are incompatible
with YourSQL version 4.1 or later. There is no reliable way to
automate the database format conversion, so action from the system
administrator is required before an upgrade can take place.
Please see the Gentoo YourSQL Upgrade Guide for instructions:
http://www.gentoo.org/doc/en/yoursql-upgrading.xml
Also see the official YourSQL documentation:
http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
After upgrading, you should also recompile any packages which link
against YourSQL:
revdep-rebuild --library=libyoursqlclient.so.12
The revdep-rebuild tool is provided by app-portage/gentoolkit.
"""
def setUp(self):
self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
self.keywords = "x86"
# Use fake/test dbapi to avoid slow tests
self.vardb = testdbapi()
# self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
# Consumers only use ARCH, so avoid portage.settings by using a dict
self.settings = { 'ARCH' : 'x86' }
def testDisplayIfProfile(self):
tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
self.profile)
item = self._processItem(tmpItem)
try:
self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
msg="Expected %s to be relevant, but it was not!" % tmpItem)
finally:
os.unlink(item.path)
def testDisplayIfInstalled(self):
tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
"sys-apps/portage")
try:
item = self._processItem(tmpItem)
self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
msg="Expected %s to be relevant, but it was not!" % tmpItem)
finally:
os.unlink(item.path)
def testDisplayIfKeyword(self):
tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
self.keywords)
try:
item = self._processItem(tmpItem)
self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
msg="Expected %s to be relevant, but it was not!" % tmpItem)
finally:
os.unlink(item.path)
def _processItem(self, item):
filename = None
fd, filename = mkstemp()
f = os.fdopen(fd, 'w')
f.write(item)
f.close()
try:
return NewsItem(filename, 0)
except TypeError:
self.fail("Error while processing news item %s" % filename)
| gpl-2.0 |
bitcity/django | django/templatetags/future.py | 129 | 2039 | import warnings
from django.template import Library, defaulttags
from django.utils.deprecation import RemovedInDjango110Warning
register = Library()
@register.tag
def cycle(parser, token):
"""
This is the future version of `cycle` with auto-escaping.
The deprecation is now complete and this version is no different
from the non-future version so this is deprecated.
By default all strings are escaped.
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% cycle var1 var2 var3 as somecycle %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% cycle var1 var2|safe var3|safe as somecycle %}
"""
warnings.warn(
"Loading the `cycle` tag from the `future` library is deprecated and "
"will be removed in Django 1.10. Use the default `cycle` tag instead.",
RemovedInDjango110Warning)
return defaulttags.cycle(parser, token)
@register.tag
def firstof(parser, token):
"""
This is the future version of `firstof` with auto-escaping.
The deprecation is now complete and this version is no different
from the non-future version so this is deprecated.
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %}
"""
warnings.warn(
"Loading the `firstof` tag from the `future` library is deprecated and "
"will be removed in Django 1.10. Use the default `firstof` tag instead.",
RemovedInDjango110Warning)
return defaulttags.firstof(parser, token)
| bsd-3-clause |
jakesyl/linux-history-full | tools/perf/tests/attr.py | 1266 | 9424 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
nikdavis/text_game_protocol | knight.py | 1 | 2178 | import curses
chessBoard = [ [0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0] ]
cursorLetter = "/"
placeLetter = "O"
def drawBoard(board):
out = []
s = ""
i = 0
for row in board:
if i == 0: out.append(" _ _ _ _ _ _ _ _ ")
j = 0
s = ""
for col in row:
s += "|"
if col:
s += "x"
else:
s += "_"
j += 1
s += "|"
out.append(s)
i += 1
return out
def knightPos(bx, by, kx, ky):
x = 1 + bx + (kx * 2)
y = 1 + by + ky
return [y, x]
def previousMove(kx, ky):
if chessBoard[ky][kx]:
return True
else:
return False
def validMove(kx, ky, kx_last, ky_last):
if not previousMove(kx, ky):
xd = abs(kx - kx_last)
yd = abs(ky - ky_last)
if (xd + yd) == 3 and xd != 3 and yd != 3:
return True
else:
return False
myscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
myscr.keypad(1)
# board position
by, bx = [12, 25]
# cursor position
ky = 0
kx = 0
# last move position
ky_last = ky
kx_last = kx
# fill starting space
chessBoard[ky][kx] = 1
i = 0
for row in drawBoard(chessBoard):
myscr.addstr( by + i, bx, row )
i += 1
[y, x] = knightPos(bx, by, kx, ky)
myscr.addstr( y, x, cursorLetter )
myscr.refresh()
while 1:
myscr.refresh()
c = myscr.getch()
if c == curses.KEY_UP:
if ky > 0:
ky -= 1
if c == curses.KEY_DOWN:
if ky < 7:
ky += 1
if c == curses.KEY_LEFT:
if kx > 0:
kx -= 1
if c == curses.KEY_RIGHT:
if kx < 7:
kx += 1
if c == ord('u'):
if validMove(kx, ky, kx_last, ky_last):
ky_last = ky
kx_last = kx
chessBoard[ky][kx] = 1
elif c == ord('q'):
break
myscr.clear()
i = 0
for row in drawBoard(chessBoard):
myscr.addstr( by + i, bx, row )
i += 1
[y, x] = knightPos(bx, by, kx_last, ky_last)
myscr.addstr( y, x, placeLetter)
[y, x] = knightPos(bx, by, kx, ky)
myscr.addstr( y, x, cursorLetter )
curses.nocbreak()
curses.curs_set(1)
myscr.keypad(0)
curses.echo()
curses.endwin()
| mit |
ThomasDq/sparrowBenchmark | deploy/third_party/boto-2.1.1/boto/sdb/db/key.py | 32 | 2040 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Key(object):
@classmethod
def from_path(cls, *args, **kwds):
raise NotImplementedError, "Paths are not currently supported"
def __init__(self, encoded=None, obj=None):
self.name = None
if obj:
self.id = obj.id
self.kind = obj.kind()
else:
self.id = None
self.kind = None
def app(self):
raise NotImplementedError, "Applications are not currently supported"
def kind(self):
return self.kind
def id(self):
return self.id
def name(self):
raise NotImplementedError, "Key Names are not currently supported"
def id_or_name(self):
return self.id
def has_id_or_name(self):
return self.id != None
def parent(self):
raise NotImplementedError, "Key parents are not currently supported"
def __str__(self):
return self.id_or_name()
| apache-2.0 |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Amazon/IAM/GetGroupPolicy.py | 5 | 4084 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetGroupPolicy
# Retrieves the specified policy document for the specified group.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetGroupPolicy(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetGroupPolicy Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetGroupPolicy, self).__init__(temboo_session, '/Library/Amazon/IAM/GetGroupPolicy')
def new_input_set(self):
return GetGroupPolicyInputSet()
def _make_result_set(self, result, path):
return GetGroupPolicyResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetGroupPolicyChoreographyExecution(session, exec_id, path)
class GetGroupPolicyInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetGroupPolicy
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(GetGroupPolicyInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(GetGroupPolicyInputSet, self)._set_input('AWSSecretKeyId', value)
def set_GroupName(self, value):
"""
Set the value of the GroupName input for this Choreo. ((required, string) The name of the group to return.)
"""
super(GetGroupPolicyInputSet, self)._set_input('GroupName', value)
def set_PolicyName(self, value):
"""
Set the value of the PolicyName input for this Choreo. ((required, string) Name of the policy document to get.)
"""
super(GetGroupPolicyInputSet, self)._set_input('PolicyName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(GetGroupPolicyInputSet, self)._set_input('ResponseFormat', value)
class GetGroupPolicyResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetGroupPolicy Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class GetGroupPolicyChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetGroupPolicyResultSet(response, path)
| gpl-2.0 |
xNovax/SickRage | lib/enum34/__init__.py | 196 | 29262 | """Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 0, 4
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_'
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| gpl-3.0 |
kkragenbrink/node-gyp | gyp/test/mac/gyptest-copies.py | 258 | 1437 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that 'copies' with app bundles are handled correctly.
"""
import TestGyp
import os
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that the copy succeeded.
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework/Versions/A',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Libraries/empty.c',
chdir='framework')
# Check that rebuilding the target a few times works.
dep_bundle = test.built_file_path('Dependency Bundle.framework',
chdir='framework')
mtime = os.path.getmtime(dep_bundle)
atime = os.path.getatime(dep_bundle)
for i in range(3):
os.utime(dep_bundle, (atime + i * 1000, mtime + i * 1000))
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that actions ran.
test.built_file_must_exist('action_file', chdir='framework')
test.pass_test()
| mit |
JBonsink/GSOC-2013 | core/BaseNode.py | 2 | 4442 | from util import abstract_method
import logging
class BaseNode(object):
""" Virtual Base Class For all nodes
All Type of Nodes Should Implment API in this abstract first
"""
name = 'base_node'
def __init__(self):
self._set_logger()
def _set_logger(self):
logging.basicConfig()
self.logger = logging.getLogger(self.name)
# self.logger.setLevel(logging.DEBUG)
self.logger.setLevel(logging.WARNING)
def start(self):
"""start the node"""
abstract_method()
#################################
### Some Utility Function ###
#################################
@property
def now(self):
"""get current time, either simulation time or real time."""
abstract_method()
def sleep(self, t, call_back=None):
"""sleep the node for a **t** seconds, if **call_back** is specified,
it will be called after node wake from sleep"""
abstract_method()
def create_timer(self, t, call_back): abstract_method()
#################################
### Network Related ###
#################################
def set_master_sock(self, sock):
"""set the **sock** to be the master sock. If a sock is master sock,
then node will for commands from the master socks"""
self.sockets[sock]['type'] = 'master'
# @property
# def client_socks(self): abstract_method()
@property
def client_socks(self):
"""all sockets whose type is client"""
return [sock for sock, v in self.sockets.iteritems() if v['type'] == 'client']
#### Socket API ####
def create_sock(self, desc):
"""create_sock with parameter specified by **desc**, **desc** can be the
address and port it tries to connect and the type of the connection"""
abstract_method()
def bind(self, sock, port):
"""bind **sock** to **port**"""
abstract_method()
def listen(self, sock, backlog):
"""listen to the **sock**. **backlog** is the max number of connection
it can maintain"""
abstract_method()
def accept(self, sock):
"""accept from the **sock**, will return the accepted socket"""
abstract_method()
def recv(self, sock, bufsize, dispatcher, threaded=False):
"""receive some data from **sock**,
- **bufsize** is the max size for the buffer.
- **dispatecher** is a function handler that will manipulate the data
- **threaded** if it is true a new thread wiill be created to avoid
blocking the main process. Only real node suppport this. in sim
Node and netns3 node, threading is not allowed.
"""
abstract_method()
def send(self, sock, data):
"""set **data** from sock."""
abstract_method()
def connect(self, sock, addr_port):
"""connect the **sock** to the server with address and port specfied by
**addr_port** """
abstract_method()
def sendto(self, sock, data, addr, port):
"""send **data** from **sock** to **addr** and **port**, use UDP
protocol"""
abstract_method()
def close(self, sock):
"""close the **sock**"""
abstract_method()
def close_socks(self, socks_seq_set, _type='client'):
"""**sock_seq_set** is the sequence of socks in this type of sock.
for example, if _type = 'client', socks_seq_set = [0], then the first
client sock need to be closed.
"""
socks_set = [self.client_socks[i] for i in socks_seq_set]
for sock in socks_set:
if self.sockets[sock]['type'] == _type:
del self.sockets[sock]
#################################
### Application Layer #######
#################################
def ping(self, sock, data, threaded=False):
"""send ping traffic using **sock** and the server information is
stored in **data**. A new thread will be created if the **threaded**
is true
"""
abstract_method()
def ftp(self, sock, data, threaded=False):
abstract_method()
def icmp(self, sock, data, threaded=False):
abstract_method()
def http(self, sock, data, threaded=False):
abstract_method()
def stop_app(self, sock, app_name):
"""stop the app run on **sock** with app_name"""
abstract_method()
| gpl-3.0 |
canavandl/bokeh | examples/embed/spectrogram/spectrogram.py | 3 | 5939 |
import json
from threading import Thread, RLock
import flask
import pyaudio
import numpy as np
import scipy as sp
from scipy.integrate import simps
from bokeh.embed import components
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.resources import Resources
from bokeh.templates import RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models.widgets import HBox, Paragraph, Slider, VBox
app = flask.Flask(__name__)
NUM_SAMPLES = 1024
SAMPLING_RATE = 44100
MAX_FREQ = SAMPLING_RATE / 2
FREQ_SAMPLES = NUM_SAMPLES / 8
NGRAMS = 800
SPECTROGRAM_LENGTH = 512
TILE_WIDTH = 500
TIMESLICE = 40 # ms
mutex = RLock()
data = None
stream = None
@app.route("/")
def root():
""" Returns the spectrogram of audio data served from /data """
spectrogram = make_spectrogram()
resources = Resources("inline")
plot_resources = RESOURCES.render(
js_raw = resources.js_raw,
css_raw = resources.css_raw,
js_files = resources.js_files,
css_files = resources.css_files,
)
plot_script, plot_div = components(
spectrogram, resources
)
html = flask.render_template(
"spectrogram.html",
plot_resources = plot_resources,
plot_script = plot_script,
plot_div = plot_div,
)
return encode_utf8(html)
@app.route("/params")
def params():
return json.dumps({
"FREQ_SAMPLES": FREQ_SAMPLES,
"MAX_FREQ": MAX_FREQ,
"NGRAMS": NGRAMS,
"NUM_SAMPLES": NUM_SAMPLES,
"SAMPLING_RATE": SAMPLING_RATE,
"SPECTROGRAM_LENGTH": SPECTROGRAM_LENGTH,
"TILE_WIDTH": TILE_WIDTH,
"TIMESLICE": TIMESLICE,
"EQ_CLAMP": 20,
"FRAMES_PER_SECOND": 20
})
@app.route("/data")
def data():
""" Return the current audio data sample as a JSON dict of three arrays
of floating-point values: (fft values, audio sample values, frequency bins)
"""
global data
have_data = False
with mutex:
if not data:
return json.dumps({})
else:
have_data = True
signal, spectrum, bins = data
data = None
if have_data:
return json.dumps({
"signal" : signal,
"spectrum" : spectrum,
"bins" : bins,
})
def main():
""" Start the sound server, which retains the audio data inside
its process space, and forks out workers when web connections are
made.
"""
t = Thread(target=get_audio_data, args=())
t.daemon = True
t.setDaemon(True)
t.start()
app.run(debug=True)
def make_spectrogram():
plot_kw = dict(
tools="", min_border=1, h_symmetry=False, v_symmetry=False, toolbar_location=None
)
freq = VBox(
children=[
Paragraph(text="Freq Range"),
Slider(orientation="vertical", start=1, end=MAX_FREQ, value=MAX_FREQ, step=1, name="freq")
]
)
gain = VBox(
children=[
Paragraph(text="Gain"),
Slider(orientation="vertical", start=1, end=20, value=1, step=1, name="gain")
]
)
spec_source = ColumnDataSource(data=dict(image=[], x=[]))
spec = figure(
title=None, plot_width=800, plot_height=300,
x_range=[0, NGRAMS], y_range=[0, MAX_FREQ], **plot_kw)
spec.image_rgba(
x='x', y=0, image='image', dw=TILE_WIDTH, dh=MAX_FREQ,
cols=TILE_WIDTH, rows=SPECTROGRAM_LENGTH,
source=spec_source, dilate=True, name="spectrogram")
spec.grid.grid_line_color = None
spectrum_source = ColumnDataSource(data=dict(x=[], y=[]))
spectrum = figure(
title="Power Spectrum", plot_width=800, plot_height=250,
y_range=[10**(-4), 10**3], x_range=[0, MAX_FREQ],
y_axis_type="log", **plot_kw)
spectrum.line(
x="x", y="y", line_color="darkblue",
source=spectrum_source, name="spectrum")
spectrum.xgrid.grid_line_dash=[2, 2]
signal_source = ColumnDataSource(data=dict(x=[], y=[]))
signal = figure(
title="Signal", plot_width=800, plot_height=250,
x_range=[0, TIMESLICE*1.01], y_range=[-0.1, 0.1], **plot_kw)
signal.line(
x="x", y="y", line_color="darkblue",
source=signal_source, name="signal")
signal.xgrid.grid_line_dash=[2, 2]
radial_source = ColumnDataSource(data=dict(
inner_radius=[], outer_radius=[], start_angle=[], end_angle=[], fill_alpha=[],
))
eq = figure(
title=None, plot_width=500, plot_height=520,
x_range=[-20, 20], y_range=[-20, 20], **plot_kw)
eq.annular_wedge(
x=0, y=0, fill_color="#688AB9", fill_alpha="fill_alpha", line_color=None,
inner_radius="inner_radius", outer_radius="outer_radius",
start_angle="start_angle", end_angle="end_angle",
source=radial_source, name="eq")
eq.grid.grid_line_color=None
lines = VBox(
children=[spectrum, signal]
)
layout = VBox(
children = [
HBox(children=[freq, gain, spec]),
HBox(children=[lines, eq])
]
)
return layout
def get_audio_data():
global data, stream
if stream is None:
pa = pyaudio.PyAudio()
stream = pa.open(
format=pyaudio.paInt16,
channels=1,
rate=SAMPLING_RATE,
input=True,
frames_per_buffer=NUM_SAMPLES
)
while True:
try:
raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)
signal = raw_data / 32768.0
fft = sp.fft(signal)
spectrum = abs(fft)[:NUM_SAMPLES/2]
power = spectrum**2
bins = [simps(a) for a in np.split(power, 16)]
with mutex:
data = signal.tolist(), spectrum.tolist(), bins
except:
with mutex:
data = None
if __name__ == "__main__":
main()
| bsd-3-clause |
jokajak/itweb | data/env/lib/python2.6/site-packages/Pygments-1.1.1-py2.6.egg/pygments/formatters/other.py | 5 | 3857 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
lasttype = None
lastval = u''
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| gpl-3.0 |
erasmux/pyramid-gb-kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
derrowap/DNC-TensorFlow | src/tasks/train.py | 1 | 11555 | """A script to train the DNC on implemented tasks.
You can start training the DNC model on any implemented task by executing:
> python -m src.tasks.train --task=<task_name>
TO SUPPORT NEW TASKS:
1) Import necessary code for task (follow necessary requirements listed below).
2) Create new section in flags and define any valid flags for the task.
3) In the "get_task" method, append a command-line name for the task to the end
of the list "valid_tasks".
4) Append a lambda function to the end of the "instantiate_task" list that
returns an instatiated object of the task using all FLAGS defined in step 2.
REQUIREMENTS FOR ALL TASKS:
* The task's class must be a sub-class of snt.AbstractModule implementing
methods `_build(self)`, `cost(output, task_state)`,
`to_string(output, task_state)`, and `process_output(output, task_state)`.
* The `_build(self)` method must return a collections.namedtuple,
`task_state`, containing at least fields 'input'. Other fields are
allowed to be used internally in the other methods. For example, the
'target' field would likely be needed for supervised learning tasks to
calculate the cost.
* The `cost(output, task_state)` method must return the losses for the
model to be used in `tf.gradients(losses, trainable_variables)`.
* The `to_string(output, task_state, model_state)` method must return a
string. This string will be logged to the console every time a report
comes up during training time. Preferrably, this string provides an
example input/output to show what the DNC model is doing.
* The `process_output(output, task_state, model_state)` method returns
the output back if no processing is needed. This method processes the
output passed to `to_string(output, task_state)`, but not to
`cost(output, task_state)`. If the output needs to be processed in
`cost(output, task_output)`, then that method needs to call it itself.
This provides ability to transform the data before
`to_string(output, task_state)` converts it to a human readable
representation. For example, if the model outputs logits, but you need
probabilitites (repeat copy task), then do that here.
* The task's class has public property `output_size`. This property must be
an integer representing the size of the output expected from the DNC model
for each iteration of this task.
"""
from .. dnc.dnc import DNC
from . dna_sequencing.dna_sequencing import DNASequencing
from . repeat_copy.repeat_copy import RepeatCopy
import sonnet as snt
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# DNC parameters
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 16, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_read_heads", 1,
"The number of memory read heads.")
tf.flags.DEFINE_integer("hidden_size", 64,
"The size of LSTM hidden layer in the controller.")
tf.flags.DEFINE_string("controller", "lstm", "The type of controller to use "
"(options: [lstm, ff]).")
# Task parameters
tf.flags.DEFINE_integer("batch_size", 16, "The batch size used in training.")
tf.flags.DEFINE_string("task", "repeat_copy", "The task to train the DNC on.")
# RepeatCopy task parameters (used only if using the RepeatCopy task)
tf.flags.DEFINE_integer("num_bits", 4,
"Dimensionality of each vector to copy.")
tf.flags.DEFINE_integer("min_length", 1,
"Lower limit on number of vectors in the observation "
"pattern to copy.")
tf.flags.DEFINE_integer("max_length", 2,
"Upper limit on number of vectors in the observation "
"pattern to copy.")
tf.flags.DEFINE_integer("min_repeats", 1,
"Lower limit on number of copy repeats.")
tf.flags.DEFINE_integer("max_repeats", 2,
"Upper limit on number of copy repeats.")
# Training parameters
tf.flags.DEFINE_integer("num_training_iterations", 1000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 100,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_string("checkpoint_dir", "~/tmp/dnc", "Checkpoint directory.")
tf.flags.DEFINE_string("checkpoint_basename", "model.ckpt",
"Base name for the checkpoint files")
tf.flags.DEFINE_integer("checkpoint_interval", -1,
"Checkpointing step interval (-1 means never).")
tf.flags.DEFINE_float("gpu_usage", 0.2,
"The percent of gpu memory to use for each process.")
tf.flags.DEFINE_boolean("test", False,
"Whether this is testing the model or not.")
# Optimizer parameters
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10,
"Epsilon used for RMSProp optimizer.")
def get_task(task_name):
"""Instantiate a task with all valid flags that provides training data."""
valid_tasks = ["repeat_copy", "dna_sequencing"]
instantiate_task = [
lambda: RepeatCopy(
num_bits=FLAGS.num_bits,
batch_size=FLAGS.batch_size,
min_length=FLAGS.min_length,
max_length=FLAGS.max_length,
min_repeats=FLAGS.min_repeats,
max_repeats=FLAGS.max_repeats),
lambda: DNASequencing(
batch_size=FLAGS.batch_size),
]
return instantiate_task[valid_tasks.index(task_name)]()
def run_model(input, output_size):
"""Run the model on the given input and returns size output_size."""
dnc_cell = DNC(output_size,
memory_size=FLAGS.memory_size,
word_size=FLAGS.word_size,
num_read_heads=FLAGS.num_read_heads,
hidden_size=FLAGS.hidden_size)
if FLAGS.test and FLAGS.task == "repeat_copy":
prev_state = dnc_cell.initial_state(1, dtype=input.dtype)
else:
prev_state = dnc_cell.initial_state(FLAGS.batch_size,
dtype=input.dtype)
if FLAGS.test and FLAGS.task == "repeat_copy":
model_state = {
'rw': prev_state.tape_head.read_weights,
'ww': prev_state.tape_head.write_weights,
'fg': prev_state.tape_head.free_gate,
'ag': prev_state.tape_head.alloc_gate,
}
output = None
model_state_t = prev_state
for time_index in range(13):
output_t, model_state_t = tf.nn.dynamic_rnn(
cell=dnc_cell,
inputs=tf.expand_dims(input[time_index, :, :], 0),
time_major=True,
initial_state=model_state_t)
if output is None:
output = output_t
else:
output = tf.concat([output, output_t], 0)
model_state['rw'] = tf.concat(
[model_state['rw'], model_state_t.tape_head.read_weights], 0)
model_state['ww'] = tf.concat(
[model_state['ww'], model_state_t.tape_head.write_weights], 0)
model_state['fg'] = tf.concat(
[model_state['fg'], model_state_t.tape_head.free_gate], 0)
model_state['ag'] = tf.concat(
[model_state['ag'], model_state_t.tape_head.alloc_gate], 0)
else:
output, model_state = tf.nn.dynamic_rnn(
cell=dnc_cell,
inputs=input,
time_major=True,
initial_state=prev_state)
return output, model_state
def run_lstm_baseline(input, output_size):
"""Run a basic LSTM basline model on given input."""
lstm = snt.LSTM(hidden_size=output_size)
initial_state = lstm.initial_state(FLAGS.batch_size, dtype=input.dtype)
output, model_state = tf.nn.dynamic_rnn(
cell=lstm,
inputs=input,
time_major=True,
initial_state=initial_state)
return output, model_state
def get_config():
"""Return configuration for a tf.Session using a fraction of GPU memory."""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_usage
def train():
"""Train the DNC and periodically report the loss."""
task = get_task(FLAGS.task)
task_state = task()
# output, model_state = run_model(task_state.input, task.output_size)
output, model_state = run_model(task_state.input, task.output_size)
output_processed = task.process_output(output, task_state, model_state)
# responsibility of task.cost to process output if desired
train_loss = task.cost(output, task_state)
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), FLAGS.max_grad_norm)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.RMSPropOptimizer(
FLAGS.learning_rate, epsilon=FLAGS.optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables), global_step=global_step)
saver = tf.train.Saver()
if FLAGS.checkpoint_interval > 0:
hooks = [
tf.train.CheckpointSaverHook(
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_basename=FLAGS.checkpoint_basename,
save_steps=FLAGS.checkpoint_interval,
saver=saver)
]
else:
hooks = []
# Training time
with tf.train.SingularMonitoredSession(
hooks=hooks, config=get_config(), checkpoint_dir=FLAGS.checkpoint_dir,
) as sess:
start_iteration = sess.run(global_step)
total_loss = 0
for train_iteration in range(start_iteration,
FLAGS.num_training_iterations):
if FLAGS.test:
loss = sess.run(train_loss)
else:
_, loss = sess.run([train_step, train_loss])
total_loss += loss
# report periodically
if (train_iteration + 1) % FLAGS.report_interval == 0:
task_state_eval, output_eval, model_state_eval = sess.run(
[task_state, output_processed, model_state])
report_string = task.to_string(
output_eval, task_state_eval, model_state_eval,
verbose=FLAGS.test)
if not FLAGS.test:
tf.logging.info(
"Train Iteration %d: Avg training loss: %f.\n",
train_iteration, total_loss / FLAGS.report_interval)
# reset total_loss to report the interval's loss only
total_loss = 0
if report_string != "":
tf.logging.info(report_string)
return task
def main(unused):
"""Main method for this app."""
tf.logging.set_verbosity(3) # Print INFO log messages.
train()
if __name__ == "__main__":
tf.app.run()
| mit |
dturner-tw/pants | src/python/pants/source/payload_fields.py | 1 | 5877 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from hashlib import sha1
from pants.base.build_environment import get_buildroot
from pants.base.payload_field import PayloadField
from pants.base.validation import assert_list
from pants.source.source_root import SourceRootConfig
from pants.source.wrapped_globs import FilesetWithSpec, matches_filespec
class SourcesField(PayloadField):
"""A PayloadField encapsulating specified sources."""
def __init__(self, sources_rel_path, sources, ref_address=None, filespec=None):
"""
:param sources_rel_path: path that sources parameter may be relative to
:param sources: list of strings representing relative file paths
:param ref_address: optional address spec of target that provides these sources
:param filespec: glob and exclude data that generated this set of sources
"""
self._rel_path = sources_rel_path
self._source_paths = self._validate_source_paths(sources)
self._ref_address = ref_address
self._filespec = filespec
@property
def source_root(self):
""":returns: the source root for these sources, or None if they're not under a source root."""
# TODO: It's a shame that we have to access the singleton directly here, instead of getting
# the SourceRoots instance from context, as tasks do. In the new engine we could inject
# this into the target, rather than have it reach out for global singletons.
return SourceRootConfig.global_instance().get_source_roots().find_by_path(self.rel_path)
@property
def filespec(self):
return self._filespec
def matches(self, path):
return matches_filespec(path, self.filespec)
@property
def rel_path(self):
return self._rel_path
@property
def source_paths(self):
return self._source_paths
@property
def address(self):
"""Returns the address this sources field refers to (used by some derived classses)"""
return self._ref_address
@property
def num_chunking_units(self):
"""For tasks that require chunking, this is the number of chunk units this field represents.
By default, this is just the number of sources. Other heuristics might consider the number
of bytes or lines in the combined source files.
"""
if self._source_paths:
return len(self._source_paths)
return 1
def has_sources(self, extension=None):
if not self._source_paths:
return False
return any(source.endswith(extension) for source in self._source_paths)
def relative_to_buildroot(self):
"""All sources joined with ``self.rel_path``."""
return [os.path.join(self.rel_path, source) for source in self.source_paths]
def _compute_fingerprint(self):
hasher = sha1()
hasher.update(self._rel_path)
for source in sorted(self.relative_to_buildroot()):
hasher.update(source)
with open(os.path.join(get_buildroot(), source), 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
def _validate_source_paths(self, sources):
if isinstance(sources, FilesetWithSpec):
return sources
else:
return assert_list(sources, key_arg='sources')
class DeferredSourcesField(SourcesField):
"""A SourcesField that isn't populated immediately when the graph is constructed.
You must subclass this and provide a fingerprint implementation. Requires a task
to call populate() to provide its contents later during processing. For example,
if sources are in an archive, you might use the fingerprint of the archive. If they
are from an external artifact, you might take a fingerprint of the name and version of
the artifact.
"""
class AlreadyPopulatedError(Exception):
"""Raised when a DeferredSourcesField has already been populated."""
pass
class NotPopulatedError(Exception):
""" Raised when the PayloadField has not been populated yet."""
def __init__(self):
super(Exception, self).__init__(
"Field requires a call to populate() before this method can be called.")
def __init__(self, ref_address):
self._populated = False
super(DeferredSourcesField, self).__init__(sources_rel_path=None, sources=[],
ref_address=ref_address)
def populate(self, sources, rel_path=None):
"""Call this method to set the list of files represented by the target.
Intended to be invoked by the DeferredSourcesMapper task.
:param list sources: strings representing absolute paths of files to be included in the source set
:param string rel_path: common prefix for files.
"""
if self._populated:
raise self.AlreadyPopulatedError("Called with rel_path={rel_path} sources={sources}"
.format(rel_path=rel_path, sources=sources))
self._rel_path = rel_path
self._source_paths = self._validate_source_paths(sources)
self._populated = True
def _validate_populated(self):
if not self._populated:
raise self.NotPopulatedError()
@property
def rel_path(self):
self._validate_populated()
return self._rel_path
@property
def source_paths(self):
self._validate_populated()
return self._source_paths
def matches(self, path):
if not self._populated:
raise self.NotPopulatedError()
return matches_filespec(path, self.filespec)
def _compute_fingerprint(self):
"""A subclass must provide an implementation of _compute_fingerprint that can return a valid
fingerprint even if the sources aren't unpacked yet.
"""
if not self._populated:
raise self.NotPopulatedError()
return super(DeferredSourcesField, self)._compute_fingerprint()
| apache-2.0 |
TeslaProject/external_chromium_org | tools/telemetry/third_party/pyserial/serial/serialutil.py | 143 | 20191 | #! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# compatibility for older Python < 2.6
try:
bytes
bytearray
except (NameError, AttributeError):
# Python older than 2.6 do not have these types. Like for Python 2.6 they
# should behave like str. For Python older than 3.0 we want to work with
# strings anyway, only later versions have a true bytes type.
bytes = str
# bytearray is a mutable type that is easily turned into an instance of
# bytes
class bytearray(list):
# for bytes(bytearray()) usage
def __str__(self): return ''.join(self)
def __repr__(self): return 'bytearray(%r)' % ''.join(self)
# append automatically converts integers to characters
def append(self, item):
if isinstance(item, str):
list.append(self, item)
else:
list.append(self, chr(item))
# +=
def __iadd__(self, other):
for byte in other:
self.append(byte)
return self
def __getslice__(self, i, j):
return bytearray(list.__getslice__(self, i, j))
def __getitem__(self, item):
if isinstance(item, slice):
return bytearray(list.__getitem__(self, item))
else:
return ord(list.__getitem__(self, item))
def __eq__(self, other):
if isinstance(other, basestring):
other = bytearray(other)
return list.__eq__(self, other)
# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
# isn't returning the contents (very unfortunate). Therefore we need special
# cases and test for it. Ensure that there is a ``memoryview`` object for older
# Python versions. This is easier than making every test dependent on its
# existence.
try:
memoryview
except (NameError, AttributeError):
# implementation does not matter as we do not realy use it.
# it just must not inherit from something else we might care for.
class memoryview:
pass
# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
# so a simple ``bytes(sequence)`` doesn't work for all versions
def to_bytes(seq):
"""convert a sequence to a bytes type"""
if isinstance(seq, bytes):
return seq
elif isinstance(seq, bytearray):
return bytes(seq)
elif isinstance(seq, memoryview):
return seq.tobytes()
else:
b = bytearray()
for item in seq:
b.append(item) # this one handles int and str for our emulation and ints for Python 3.x
return bytes(b)
# create control bytes
XON = to_bytes([17])
XOFF = to_bytes([19])
CR = to_bytes([13])
LF = to_bytes([10])
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
class SerialException(IOError):
"""Base class for serial port related exceptions."""
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException('Write timeout')
portNotOpenError = SerialException('Attempting to use a port that is not open')
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def __init__(self):
self.closed = True
def close(self):
self.closed = True
# so that ports are closed when objects are discarded
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
# iterator for e.g. "for line in Serial(0): ..." usage
def next(self):
line = self.readline()
if not line: raise StopIteration
return line
def __iter__(self):
return self
def readline(self, size=None, eol=LF):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout."""
leneol = len(eol)
line = bytearray()
while True:
c = self.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return bytes(line)
def readlines(self, sizehint=None, eol=LF):
"""read a list of lines, until timeout.
sizehint is ignored."""
if self.timeout is None:
raise ValueError("Serial port MUST have enabled timeout for this function!")
leneol = len(eol)
lines = []
while True:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-leneol:] != eol: # was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""Read lines, implemented as generator. It will raise StopIteration on
timeout (empty read). sizehint is ignored."""
while True:
line = self.readline()
if not line: break
yield line
# other functions of file-likes - not used by pySerial
#~ readinto(b)
def seek(self, pos, whence=0):
raise IOError("file is not seekable")
def tell(self):
raise IOError("file is not seekable")
def truncate(self, n=None):
raise IOError("file is not seekable")
def isatty(self):
return False
class SerialBase(object):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
# default values, may be overridden in subclasses that do not support all values
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
3000000, 3500000, 4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
def __init__(self,
port = None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=None, # set a timeout value, None to wait forever
xonxoff=False, # enable software flow control
rtscts=False, # enable RTS/CTS flow control
writeTimeout=None, # set a timeout for writes
dsrdtr=False, # None: use rtscts setting, dsrdtr override if True or False
interCharTimeout=None # Inter-character timeout, None to disable
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None # correct value is assigned below through properties
self._baudrate = None # correct value is assigned below through properties
self._bytesize = None # correct value is assigned below through properties
self._parity = None # correct value is assigned below through properties
self._stopbits = None # correct value is assigned below through properties
self._timeout = None # correct value is assigned below through properties
self._writeTimeout = None # correct value is assigned below through properties
self._xonxoff = None # correct value is assigned below through properties
self._rtscts = None # correct value is assigned below through properties
self._dsrdtr = None # correct value is assigned below through properties
self._interCharTimeout = None # correct value is assigned below through properties
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: these are not really needed as the is the BAUDRATES etc. attribute...
# maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open: self.close()
if port is not None:
if isinstance(port, basestring):
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
self.name = self.portstr
if was_open: self.open()
def getPort(self):
"""Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baud rate. It raises a ValueError if the port is open and the
baud rate is not possible. If the port is closed, then the value is
accepted and the exception is raised when the port is opened."""
try:
b = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if b <= 0:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
self._baudrate = b
if self._isOpen: self._reconfigurePort()
def getBaudrate(self):
"""Get the current baud rate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baud rate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self._isOpen: self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self._isOpen: self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stop bits size."""
if stopbits not in self.STOPBITS: raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
self._stopbits = stopbits
if self._isOpen: self._reconfigurePort()
def getStopbits(self):
"""Get the current stop bits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stop bits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self._isOpen: self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen: self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XON/XOFF setting."""
self._xonxoff = xonxoff
if self._isOpen: self._reconfigurePort()
def getXonXoff(self):
"""Get the current XON/XOFF setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="XON/XOFF setting")
def setRtsCts(self, rtscts):
"""Change RTS/CTS flow control setting."""
self._rtscts = rtscts
if self._isOpen: self._reconfigurePort()
def getRtsCts(self):
"""Get the current RTS/CTS flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen: self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DSR/DTR flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
def setInterCharTimeout(self, interCharTimeout):
"""Change inter-character timeout setting."""
if interCharTimeout is not None:
if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
try:
interCharTimeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
self._interCharTimeout = interCharTimeout
if self._isOpen: self._reconfigurePort()
def getInterCharTimeout(self):
"""Get the current inter-character timeout setting."""
return self._interCharTimeout
interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
# - - - - - - - - - - - - - - - - - - - - - - - -
_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
'dsrdtr', 'rtscts', 'timeout', 'writeTimeout', 'interCharTimeout')
def getSettingsDict(self):
"""Get current port settings as a dictionary. For use with
applySettingsDict"""
return dict([(key, getattr(self, '_'+key)) for key in self._SETTINGS])
def applySettingsDict(self, d):
"""apply stored settings from a dictionary returned from
getSettingsDict. it's allowed to delete keys from the dictionary. these
values will simply left unchanged."""
for key in self._SETTINGS:
if d[key] != getattr(self, '_'+key): # check against internal "_" value
setattr(self, key, d[key]) # set non "_" value to use properties write function
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self._isOpen,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
# - - - - - - - - - - - - - - - - - - - - - - - -
# compatibility with io library
def readable(self): return True
def writable(self): return True
def seekable(self): return False
def readinto(self, b):
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError, err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
if __name__ == '__main__':
import sys
s = SerialBase()
sys.stdout.write('port name: %s\n' % s.portstr)
sys.stdout.write('baud rates: %s\n' % s.getSupportedBaudrates())
sys.stdout.write('byte sizes: %s\n' % s.getSupportedByteSizes())
sys.stdout.write('parities: %s\n' % s.getSupportedParities())
sys.stdout.write('stop bits: %s\n' % s.getSupportedStopbits())
sys.stdout.write('%s\n' % s)
| bsd-3-clause |
open-synergy/donation | donation/donation_campaign.py | 3 | 1834 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Donation module for Odoo
# Copyright (C) 2014-2015 Barroux Abbey (www.barroux.org)
# Copyright (C) 2014-2015 Akretion France (www.akretion.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class DonationCampaign(models.Model):
_name = 'donation.campaign'
_description = 'Code attributed for a Donation Campaign'
_order = 'code'
_rec_name = 'display_name'
@api.one
@api.depends('code', 'name')
def _compute_display_name(self):
name = self.name
if self.code:
name = u'[%s] %s' % (self.code, name)
self.display_name = name
code = fields.Char(string='Code', size=10)
name = fields.Char(string='Name', required=True)
display_name = fields.Char(
string='Display Name', compute='_compute_display_name',
readonly=True, store=True)
start_date = fields.Date(
string='Start Date', default=fields.Date.context_today)
nota = fields.Text(string='Notes')
| agpl-3.0 |
Arundhatii/erpnext | erpnext/stock/report/item_prices/item_prices.py | 41 | 4292 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details()
pl = get_price_list()
last_purchase_rate = get_last_purchase_rate()
bom_rate = get_item_bom_rate()
val_rate_map = get_valuation_rate()
from erpnext.accounts.utils import get_currency_precision
precision = get_currency_precision() or 2
data = []
for item in sorted(item_map):
data.append([item, item_map[item]["item_name"],item_map[item]["item_group"],
item_map[item]["description"], item_map[item]["stock_uom"],
flt(last_purchase_rate.get(item, 0), precision),
flt(val_rate_map.get(item, 0), precision),
pl.get(item, {}).get("Selling"),
pl.get(item, {}).get("Buying"),
flt(bom_rate.get(item, 0), precision)
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("Item") + ":Link/Item:100", _("Item Name") + "::150",_("Item Group") + ":Link/Item Group:125", _("Description") + "::150", _("UOM") + ":Link/UOM:80",
_("Last Purchase Rate") + ":Currency:90", _("Valuation Rate") + ":Currency:80", _("Sales Price List") + "::180",
_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
return columns
def get_item_details():
"""returns all items details"""
item_map = {}
for i in frappe.db.sql("select name, item_group, item_name, description, \
stock_uom from tabItem \
order by item_code, item_group", as_dict=1):
item_map.setdefault(i.name, i)
return item_map
def get_price_list():
"""Get selling & buying price list of every item"""
rate = {}
price_list = frappe.db.sql("""select ip.item_code, ip.buying, ip.selling,
concat(ifnull(cu.symbol,ip.currency), " ", round(ip.price_list_rate,2), " - ", ip.price_list) as price
from `tabItem Price` ip, `tabPrice List` pl, `tabCurrency` cu
where ip.price_list=pl.name and pl.currency=cu.name and pl.enabled=1""", as_dict=1)
for j in price_list:
if j.price:
rate.setdefault(j.item_code, {}).setdefault("Buying" if j.buying else "Selling", []).append(j.price)
item_rate_map = {}
for item in rate:
for buying_or_selling in rate[item]:
item_rate_map.setdefault(item, {}).setdefault(buying_or_selling,
", ".join(rate[item].get(buying_or_selling, [])))
return item_rate_map
def get_last_purchase_rate():
item_last_purchase_rate_map = {}
query = """select * from (select
result.item_code,
result.base_rate
from (
(select
po_item.item_code,
po_item.item_name,
po.transaction_date as posting_date,
po_item.base_price_list_rate,
po_item.discount_percentage,
po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent and po.docstatus = 1)
union
(select
pr_item.item_code,
pr_item.item_name,
pr.posting_date,
pr_item.base_price_list_rate,
pr_item.discount_percentage,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent and pr.docstatus = 1)
) result
order by result.item_code asc, result.posting_date desc) result_wrapper
group by item_code"""
for d in frappe.db.sql(query, as_dict=1):
item_last_purchase_rate_map.setdefault(d.item_code, d.base_rate)
return item_last_purchase_rate_map
def get_item_bom_rate():
"""Get BOM rate of an item from BOM"""
item_bom_map = {}
for b in frappe.db.sql("""select item, (total_cost/quantity) as bom_rate
from `tabBOM` where is_active=1 and is_default=1""", as_dict=1):
item_bom_map.setdefault(b.item, flt(b.bom_rate))
return item_bom_map
def get_valuation_rate():
"""Get an average valuation rate of an item from all warehouses"""
item_val_rate_map = {}
for d in frappe.db.sql("""select item_code,
sum(actual_qty*valuation_rate)/sum(actual_qty) as val_rate
from tabBin where actual_qty > 0 group by item_code""", as_dict=1):
item_val_rate_map.setdefault(d.item_code, d.val_rate)
return item_val_rate_map
| gpl-3.0 |
mgarski/stellr | tests/pool_test.py | 1 | 6568 | # Copyright 2011-2012 Michael Garski (mgarski@mac.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
import unittest
import gevent.queue
from gevent_zeromq import zmq
import stellr
ADDRESS = 'tcp://1.2.3.4:69'
class PoolTest(unittest.TestCase):
"""Perform tests on the pool module."""
def pool_manager_creation_test(self):
"""Test the creation of the PoolManager."""
context = Mock()
p = stellr.pool.PoolManager(context, 42)
self.assertEquals(p.context, context)
self.assertEquals(p.size, 42)
self.assertEquals(p.pools, {})
@patch('stellr.pool.PoolManager')
def pool_creation_test(self, pool_mgr):
"""Test the creation of the pool through the context."""
mgr = Mock()
pool_mgr.return_value = mgr
context = Mock()
stellr.pool.zmq_socket_pool.create(context, 69)
self.assertEquals(stellr.pool.zmq_socket_pool.pool, mgr)
pool_mgr.assert_called_once_with(context, 69)
def create_socket_test(self):
"""Test the _create_socket method."""
context = Mock()
p = stellr.pool.PoolManager(context)
socket = Mock()
context.socket.return_value = socket
s = p._create_socket(ADDRESS)
self.assertEqual(s, socket)
context.socket.assert_called_once_with(zmq.REQ)
socket.connect.assert_called_once_with(ADDRESS)
def destroy_socket_test(self):
"""Test the destroy_socket method."""
p = stellr.pool.PoolManager(Mock())
socket = Mock()
p.destroy_socket(socket)
socket.setsockopt.assert_called_once_with(zmq.LINGER, 0)
@patch('gevent.queue.Queue')
def get_socket_empty_queue_test(self, q):
"""Test getting a socket with an empty pool."""
queue = Mock()
queue.empty.return_value = True
q.return_value = queue
context = Mock()
p = stellr.pool.PoolManager(context)
socket = Mock()
socket.return_value = socket
p._create_socket = socket
s = p.get_socket(ADDRESS)
self.assertEqual(s, socket)
self.assertEqual(1, queue.empty.call_count)
self.assertEqual(1, len(p.pools))
@patch('gevent.queue.Queue')
def get_socket_empty_queue_empty_error_test(self, q):
"""Test getting a socket with pool that throws an empty error."""
queue = Mock()
queue.empty.return_value = False
queue.get_nowait.side_effect = gevent.queue.Empty
q.return_value = queue
context = Mock()
p = stellr.pool.PoolManager(context)
socket = Mock()
socket.return_value = socket
p._create_socket = socket
s = p.get_socket(ADDRESS)
self.assertEqual(s, socket)
self.assertEqual(1, queue.empty.call_count)
self.assertEqual(1, len(p.pools))
@patch('gevent.queue.Queue')
def get_socket_non_empty_queue_test(self, q):
"""Test getting a socket with a non-empty pool."""
socket = Mock()
queue = Mock()
queue.empty.return_value = False
queue.get_nowait.return_value = socket
q.return_value = queue
context = Mock()
p = stellr.pool.PoolManager(context)
p.pools[ADDRESS] = queue
s = p.get_socket(ADDRESS)
self.assertEqual(s, socket)
self.assertEqual(1, queue.empty.call_count)
self.assertEqual(1, len(p.pools))
def replace_socket_success_test(self):
"""Test successfully replacing a socket."""
p = stellr.pool.PoolManager(Mock())
q = Mock()
q.full.return_value = False
p.pools[ADDRESS] = q
s = Mock()
p.replace_socket(ADDRESS, s)
self.assertEqual(1, q.full.call_count)
q.put_nowait.assert_called_once_with(s)
def replace_socket_no_queue_test(self):
"""Test replacing a socket with no pool to put it in."""
p = stellr.pool.PoolManager(Mock())
destroy = Mock()
p.destroy_socket = destroy
socket = Mock()
p.replace_socket(ADDRESS, socket)
destroy.assert_called_once_with(socket)
def replace_socket_full_queue_test(self):
"""Test replacing a socket into a full pool."""
q = Mock()
q.full.return_value = True
socket = Mock()
p = stellr.pool.PoolManager(Mock())
p.pools[ADDRESS] = q
destroy = Mock()
p.destroy_socket = destroy
p.replace_socket(ADDRESS, socket)
destroy.assert_called_once_with(socket)
def replace_socket_full_queue_full_error_test(self):
"""Test replacing a socket into a full pool."""
q = Mock()
q.full.return_value = False
q.put_nowait.side_effect = gevent.queue.Full
socket = Mock()
p = stellr.pool.PoolManager(Mock())
p.pools[ADDRESS] = q
destroy = Mock()
p.destroy_socket = destroy
p.replace_socket(ADDRESS, socket)
destroy.assert_called_once_with(socket)
def enter_context_test(self):
"""Test entering the context."""
z = stellr.pool.zmq_socket_pool(ADDRESS)
socket = Mock()
pool = Mock()
pool.get_socket.return_value = socket
stellr.pool.zmq_socket_pool.pool = pool
with z as f:
self.assertEqual(f, socket)
pool.get_socket.assert_called_once_with(ADDRESS)
pool.replace_socket.assert_called_once_with(ADDRESS, socket)
def enter_context_error_test(self):
"""Test entering the context."""
z = stellr.pool.zmq_socket_pool(ADDRESS)
socket = Mock()
pool = Mock()
pool.get_socket.return_value = socket
stellr.pool.zmq_socket_pool.pool = pool
try:
with z as f:
self.assertEqual(f, socket)
raise Exception()
except Exception:
pass
pool.get_socket.assert_called_once_with(ADDRESS)
pool.destroy_socket.assert_called_once_with(socket) | apache-2.0 |
imply/chuu | third_party/pexpect/ANSI.py | 171 | 12646 | """This implements an ANSI (VT100) terminal emulator as a subclass of screen.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# references:
# http://en.wikipedia.org/wiki/ANSI_escape_code
# http://www.retards.org/terminals/vt102.html
# http://vt100.net/docs/vt102-ug/contents.html
# http://vt100.net/docs/vt220-rm/
# http://www.termsys.demon.co.uk/vtansi.htm
import screen
import FSM
import copy
import string
#
# The 'Do.*' functions are helper functions for the ANSI class.
#
def DoEmit (fsm):
screen = fsm.memory[0]
screen.write_ch(fsm.input_symbol)
def DoStartNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def DoBuildNumber (fsm):
ns = fsm.memory.pop()
ns = ns + fsm.input_symbol
fsm.memory.append (ns)
def DoBackOne (fsm):
screen = fsm.memory[0]
screen.cursor_back ()
def DoBack (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_back (count)
def DoDownOne (fsm):
screen = fsm.memory[0]
screen.cursor_down ()
def DoDown (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_down (count)
def DoForwardOne (fsm):
screen = fsm.memory[0]
screen.cursor_forward ()
def DoForward (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_forward (count)
def DoUpReverse (fsm):
screen = fsm.memory[0]
screen.cursor_up_reverse()
def DoUpOne (fsm):
screen = fsm.memory[0]
screen.cursor_up ()
def DoUp (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_up (count)
def DoHome (fsm):
c = int(fsm.memory.pop())
r = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoHomeOrigin (fsm):
c = 1
r = 1
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoEraseDown (fsm):
screen = fsm.memory[0]
screen.erase_down()
def DoErase (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_down()
elif arg == 1:
screen.erase_up()
elif arg == 2:
screen.erase_screen()
def DoEraseEndOfLine (fsm):
screen = fsm.memory[0]
screen.erase_end_of_line()
def DoEraseLine (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_end_of_line()
elif arg == 1:
screen.erase_start_of_line()
elif arg == 2:
screen.erase_line()
def DoEnableScroll (fsm):
screen = fsm.memory[0]
screen.scroll_screen()
def DoCursorSave (fsm):
screen = fsm.memory[0]
screen.cursor_save_attrs()
def DoCursorRestore (fsm):
screen = fsm.memory[0]
screen.cursor_restore_attrs()
def DoScrollRegion (fsm):
screen = fsm.memory[0]
r2 = int(fsm.memory.pop())
r1 = int(fsm.memory.pop())
screen.scroll_screen_rows (r1,r2)
def DoMode (fsm):
screen = fsm.memory[0]
mode = fsm.memory.pop() # Should be 4
# screen.setReplaceMode ()
def DoLog (fsm):
screen = fsm.memory[0]
fsm.memory = [screen]
fout = open ('log', 'a')
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
fout.close()
class term (screen.screen):
"""This class is an abstract, generic terminal.
This does nothing. This is a placeholder that
provides a common base class for other terminals
such as an ANSI terminal. """
def __init__ (self, r=24, c=80):
screen.screen.__init__(self, r,c)
class ANSI (term):
"""This class implements an ANSI (VT100) terminal.
It is a stream filter that recognizes ANSI terminal
escape sequences and maintains the state of a screen object. """
def __init__ (self, r=24,c=80):
term.__init__(self,r,c)
#self.screen = screen (24,80)
self.state = FSM.FSM ('INIT',[self])
self.state.set_default_transition (DoLog, 'INIT')
self.state.add_transition_any ('INIT', DoEmit, 'INIT')
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
self.state.add_transition_any ('ESC', DoLog, 'INIT')
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
self.state.add_transition (')', 'ESC', None, 'G1SCS')
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
self.state.add_transition ('[', 'ESC', None, 'ELB')
# ELB means Escape Left Bracket. That is ^[[
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
self.state.add_transition ('m', 'ELB', None, 'INIT')
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_1', None, 'INIT')
### LED control. Same implementation problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_1', None, 'INIT')
# \E[?47h switch to alternate screen
# \E[?47l restores to normal screen from alternate screen.
self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT')
self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT')
#RM Reset Mode Esc [ Ps l none
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_2', None, 'INIT')
### LED control. Same problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_2', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
# Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON_X', None, 'NUMBER_X')
self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
self.state.add_transition ('m', 'NUMBER_X', None, 'INIT')
self.state.add_transition ('q', 'NUMBER_X', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
def process (self, c):
self.state.process(c)
def process_list (self, l):
self.write(l)
def write (self, s):
for c in s:
self.process(c)
def flush (self):
pass
def write_ch (self, ch):
"""This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. """
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == '\r':
self.cr()
return
if ch == '\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
if ch not in string.printable:
fout = open ('log', 'a')
fout.write ('Nonprint: ' + str(ord(ch)) + '\n')
fout.close()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
# def test (self):
#
# import sys
# write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)\n' + \
# 'I can see a bare-bottomed mandril.\n' + \
# '(Slyly eyeing his other nostril.)\n' + \
# 'If it jumps inside there too I really don\'t know what to do\n' + \
# 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
# '(A nasal zoo.)\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(And what is worst of all it constantly explodes.)\n' + \
# '"Ferrets don\'t explode," you say\n' + \
# 'But it happened nine times yesterday\n' + \
# 'And I should know for each time I was standing in the way.\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)'
# self.fill('.')
# self.cursor_home()
# for c in write_text:
# self.write_ch (c)
# print str(self)
#
#if __name__ == '__main__':
# t = ANSI(6,65)
# t.test()
| bsd-3-clause |
wangjun/wakatime | wakatime/packages/pygments_py3/pygments/lexers/_scilab_builtins.py | 29 | 52384 | # -*- coding: utf-8 -*-
"""
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated
commands_kw = (
'abort',
'apropos',
'break',
'case',
'catch',
'continue',
'do',
'else',
'elseif',
'end',
'endfunction',
'for',
'function',
'help',
'if',
'pause',
'quit',
'select',
'then',
'try',
'while',
)
functions_kw = (
'!!_invoke_',
'%H5Object_e',
'%H5Object_fieldnames',
'%H5Object_p',
'%XMLAttr_6',
'%XMLAttr_e',
'%XMLAttr_i_XMLElem',
'%XMLAttr_length',
'%XMLAttr_p',
'%XMLAttr_size',
'%XMLDoc_6',
'%XMLDoc_e',
'%XMLDoc_i_XMLList',
'%XMLDoc_p',
'%XMLElem_6',
'%XMLElem_e',
'%XMLElem_i_XMLDoc',
'%XMLElem_i_XMLElem',
'%XMLElem_i_XMLList',
'%XMLElem_p',
'%XMLList_6',
'%XMLList_e',
'%XMLList_i_XMLElem',
'%XMLList_i_XMLList',
'%XMLList_length',
'%XMLList_p',
'%XMLList_size',
'%XMLNs_6',
'%XMLNs_e',
'%XMLNs_i_XMLElem',
'%XMLNs_p',
'%XMLSet_6',
'%XMLSet_e',
'%XMLSet_length',
'%XMLSet_p',
'%XMLSet_size',
'%XMLValid_p',
'%_EClass_6',
'%_EClass_e',
'%_EClass_p',
'%_EObj_0',
'%_EObj_1__EObj',
'%_EObj_1_b',
'%_EObj_1_c',
'%_EObj_1_i',
'%_EObj_1_s',
'%_EObj_2__EObj',
'%_EObj_2_b',
'%_EObj_2_c',
'%_EObj_2_i',
'%_EObj_2_s',
'%_EObj_3__EObj',
'%_EObj_3_b',
'%_EObj_3_c',
'%_EObj_3_i',
'%_EObj_3_s',
'%_EObj_4__EObj',
'%_EObj_4_b',
'%_EObj_4_c',
'%_EObj_4_i',
'%_EObj_4_s',
'%_EObj_5',
'%_EObj_6',
'%_EObj_a__EObj',
'%_EObj_a_b',
'%_EObj_a_c',
'%_EObj_a_i',
'%_EObj_a_s',
'%_EObj_d__EObj',
'%_EObj_d_b',
'%_EObj_d_c',
'%_EObj_d_i',
'%_EObj_d_s',
'%_EObj_disp',
'%_EObj_e',
'%_EObj_g__EObj',
'%_EObj_g_b',
'%_EObj_g_c',
'%_EObj_g_i',
'%_EObj_g_s',
'%_EObj_h__EObj',
'%_EObj_h_b',
'%_EObj_h_c',
'%_EObj_h_i',
'%_EObj_h_s',
'%_EObj_i__EObj',
'%_EObj_j__EObj',
'%_EObj_j_b',
'%_EObj_j_c',
'%_EObj_j_i',
'%_EObj_j_s',
'%_EObj_k__EObj',
'%_EObj_k_b',
'%_EObj_k_c',
'%_EObj_k_i',
'%_EObj_k_s',
'%_EObj_l__EObj',
'%_EObj_l_b',
'%_EObj_l_c',
'%_EObj_l_i',
'%_EObj_l_s',
'%_EObj_m__EObj',
'%_EObj_m_b',
'%_EObj_m_c',
'%_EObj_m_i',
'%_EObj_m_s',
'%_EObj_n__EObj',
'%_EObj_n_b',
'%_EObj_n_c',
'%_EObj_n_i',
'%_EObj_n_s',
'%_EObj_o__EObj',
'%_EObj_o_b',
'%_EObj_o_c',
'%_EObj_o_i',
'%_EObj_o_s',
'%_EObj_p',
'%_EObj_p__EObj',
'%_EObj_p_b',
'%_EObj_p_c',
'%_EObj_p_i',
'%_EObj_p_s',
'%_EObj_q__EObj',
'%_EObj_q_b',
'%_EObj_q_c',
'%_EObj_q_i',
'%_EObj_q_s',
'%_EObj_r__EObj',
'%_EObj_r_b',
'%_EObj_r_c',
'%_EObj_r_i',
'%_EObj_r_s',
'%_EObj_s__EObj',
'%_EObj_s_b',
'%_EObj_s_c',
'%_EObj_s_i',
'%_EObj_s_s',
'%_EObj_t',
'%_EObj_x__EObj',
'%_EObj_x_b',
'%_EObj_x_c',
'%_EObj_x_i',
'%_EObj_x_s',
'%_EObj_y__EObj',
'%_EObj_y_b',
'%_EObj_y_c',
'%_EObj_y_i',
'%_EObj_y_s',
'%_EObj_z__EObj',
'%_EObj_z_b',
'%_EObj_z_c',
'%_EObj_z_i',
'%_EObj_z_s',
'%_eigs',
'%_load',
'%b_1__EObj',
'%b_2__EObj',
'%b_3__EObj',
'%b_4__EObj',
'%b_a__EObj',
'%b_d__EObj',
'%b_g__EObj',
'%b_h__EObj',
'%b_i_XMLList',
'%b_i__EObj',
'%b_j__EObj',
'%b_k__EObj',
'%b_l__EObj',
'%b_m__EObj',
'%b_n__EObj',
'%b_o__EObj',
'%b_p__EObj',
'%b_q__EObj',
'%b_r__EObj',
'%b_s__EObj',
'%b_x__EObj',
'%b_y__EObj',
'%b_z__EObj',
'%c_1__EObj',
'%c_2__EObj',
'%c_3__EObj',
'%c_4__EObj',
'%c_a__EObj',
'%c_d__EObj',
'%c_g__EObj',
'%c_h__EObj',
'%c_i_XMLAttr',
'%c_i_XMLDoc',
'%c_i_XMLElem',
'%c_i_XMLList',
'%c_i__EObj',
'%c_j__EObj',
'%c_k__EObj',
'%c_l__EObj',
'%c_m__EObj',
'%c_n__EObj',
'%c_o__EObj',
'%c_p__EObj',
'%c_q__EObj',
'%c_r__EObj',
'%c_s__EObj',
'%c_x__EObj',
'%c_y__EObj',
'%c_z__EObj',
'%ce_i_XMLList',
'%fptr_i_XMLList',
'%h_i_XMLList',
'%hm_i_XMLList',
'%i_1__EObj',
'%i_2__EObj',
'%i_3__EObj',
'%i_4__EObj',
'%i_a__EObj',
'%i_abs',
'%i_cumprod',
'%i_cumsum',
'%i_d__EObj',
'%i_diag',
'%i_g__EObj',
'%i_h__EObj',
'%i_i_XMLList',
'%i_i__EObj',
'%i_j__EObj',
'%i_k__EObj',
'%i_l__EObj',
'%i_m__EObj',
'%i_matrix',
'%i_max',
'%i_maxi',
'%i_min',
'%i_mini',
'%i_mput',
'%i_n__EObj',
'%i_o__EObj',
'%i_p',
'%i_p__EObj',
'%i_prod',
'%i_q__EObj',
'%i_r__EObj',
'%i_s__EObj',
'%i_sum',
'%i_tril',
'%i_triu',
'%i_x__EObj',
'%i_y__EObj',
'%i_z__EObj',
'%ip_i_XMLList',
'%l_i_XMLList',
'%l_i__EObj',
'%lss_i_XMLList',
'%mc_i_XMLList',
'%msp_full',
'%msp_i_XMLList',
'%msp_spget',
'%p_i_XMLList',
'%ptr_i_XMLList',
'%r_i_XMLList',
'%s_1__EObj',
'%s_2__EObj',
'%s_3__EObj',
'%s_4__EObj',
'%s_a__EObj',
'%s_d__EObj',
'%s_g__EObj',
'%s_h__EObj',
'%s_i_XMLList',
'%s_i__EObj',
'%s_j__EObj',
'%s_k__EObj',
'%s_l__EObj',
'%s_m__EObj',
'%s_n__EObj',
'%s_o__EObj',
'%s_p__EObj',
'%s_q__EObj',
'%s_r__EObj',
'%s_s__EObj',
'%s_x__EObj',
'%s_y__EObj',
'%s_z__EObj',
'%sp_i_XMLList',
'%spb_i_XMLList',
'%st_i_XMLList',
'Calendar',
'ClipBoard',
'Matplot',
'Matplot1',
'PlaySound',
'TCL_DeleteInterp',
'TCL_DoOneEvent',
'TCL_EvalFile',
'TCL_EvalStr',
'TCL_ExistArray',
'TCL_ExistInterp',
'TCL_ExistVar',
'TCL_GetVar',
'TCL_GetVersion',
'TCL_SetVar',
'TCL_UnsetVar',
'TCL_UpVar',
'_',
'_code2str',
'_d',
'_str2code',
'about',
'abs',
'acos',
'addModulePreferences',
'addcolor',
'addf',
'addhistory',
'addinter',
'addlocalizationdomain',
'amell',
'and',
'argn',
'arl2_ius',
'ascii',
'asin',
'atan',
'backslash',
'balanc',
'banner',
'base2dec',
'basename',
'bdiag',
'beep',
'besselh',
'besseli',
'besselj',
'besselk',
'bessely',
'beta',
'bezout',
'bfinit',
'blkfc1i',
'blkslvi',
'bool2s',
'browsehistory',
'browsevar',
'bsplin3val',
'buildDoc',
'buildouttb',
'bvode',
'c_link',
'call',
'callblk',
'captions',
'cd',
'cdfbet',
'cdfbin',
'cdfchi',
'cdfchn',
'cdff',
'cdffnc',
'cdfgam',
'cdfnbn',
'cdfnor',
'cdfpoi',
'cdft',
'ceil',
'champ',
'champ1',
'chdir',
'chol',
'clc',
'clean',
'clear',
'clearfun',
'clearglobal',
'closeEditor',
'closeEditvar',
'closeXcos',
'code2str',
'coeff',
'color',
'comp',
'completion',
'conj',
'contour2di',
'contr',
'conv2',
'convstr',
'copy',
'copyfile',
'corr',
'cos',
'coserror',
'createdir',
'cshep2d',
'csvDefault',
'csvIsnum',
'csvRead',
'csvStringToDouble',
'csvTextScan',
'csvWrite',
'ctree2',
'ctree3',
'ctree4',
'cumprod',
'cumsum',
'curblock',
'curblockc',
'daskr',
'dasrt',
'dassl',
'data2sig',
'datatipCreate',
'datatipManagerMode',
'datatipMove',
'datatipRemove',
'datatipSetDisplay',
'datatipSetInterp',
'datatipSetOrientation',
'datatipSetStyle',
'datatipToggle',
'dawson',
'dct',
'debug',
'dec2base',
'deff',
'definedfields',
'degree',
'delbpt',
'delete',
'deletefile',
'delip',
'delmenu',
'det',
'dgettext',
'dhinf',
'diag',
'diary',
'diffobjs',
'disp',
'dispbpt',
'displayhistory',
'disposefftwlibrary',
'dlgamma',
'dnaupd',
'dneupd',
'double',
'drawaxis',
'drawlater',
'drawnow',
'driver',
'dsaupd',
'dsearch',
'dseupd',
'dst',
'duplicate',
'editvar',
'emptystr',
'end_scicosim',
'ereduc',
'erf',
'erfc',
'erfcx',
'erfi',
'errcatch',
'errclear',
'error',
'eval_cshep2d',
'exec',
'execstr',
'exists',
'exit',
'exp',
'expm',
'exportUI',
'export_to_hdf5',
'eye',
'fadj2sp',
'fec',
'feval',
'fft',
'fftw',
'fftw_flags',
'fftw_forget_wisdom',
'fftwlibraryisloaded',
'figure',
'file',
'filebrowser',
'fileext',
'fileinfo',
'fileparts',
'filesep',
'find',
'findBD',
'findfiles',
'fire_closing_finished',
'floor',
'format',
'fort',
'fprintfMat',
'freq',
'frexp',
'fromc',
'fromjava',
'fscanfMat',
'fsolve',
'fstair',
'full',
'fullpath',
'funcprot',
'funptr',
'gamma',
'gammaln',
'geom3d',
'get',
'getURL',
'get_absolute_file_path',
'get_fftw_wisdom',
'getblocklabel',
'getcallbackobject',
'getdate',
'getdebuginfo',
'getdefaultlanguage',
'getdrives',
'getdynlibext',
'getenv',
'getfield',
'gethistory',
'gethistoryfile',
'getinstalledlookandfeels',
'getio',
'getlanguage',
'getlongpathname',
'getlookandfeel',
'getmd5',
'getmemory',
'getmodules',
'getos',
'getpid',
'getrelativefilename',
'getscicosvars',
'getscilabmode',
'getshortpathname',
'gettext',
'getvariablesonstack',
'getversion',
'glist',
'global',
'glue',
'grand',
'graphicfunction',
'grayplot',
'grep',
'gsort',
'gstacksize',
'h5attr',
'h5close',
'h5cp',
'h5dataset',
'h5dump',
'h5exists',
'h5flush',
'h5get',
'h5group',
'h5isArray',
'h5isAttr',
'h5isCompound',
'h5isFile',
'h5isGroup',
'h5isList',
'h5isRef',
'h5isSet',
'h5isSpace',
'h5isType',
'h5isVlen',
'h5label',
'h5ln',
'h5ls',
'h5mount',
'h5mv',
'h5open',
'h5read',
'h5readattr',
'h5rm',
'h5umount',
'h5write',
'h5writeattr',
'havewindow',
'helpbrowser',
'hess',
'hinf',
'historymanager',
'historysize',
'host',
'htmlDump',
'htmlRead',
'htmlReadStr',
'htmlWrite',
'iconvert',
'ieee',
'ilib_verbose',
'imag',
'impl',
'import_from_hdf5',
'imult',
'inpnvi',
'int',
'int16',
'int2d',
'int32',
'int3d',
'int8',
'interp',
'interp2d',
'interp3d',
'intg',
'intppty',
'inttype',
'inv',
'invoke_lu',
'is_handle_valid',
'is_hdf5_file',
'isalphanum',
'isascii',
'isdef',
'isdigit',
'isdir',
'isequal',
'isequalbitwise',
'iserror',
'isfile',
'isglobal',
'isletter',
'isnum',
'isreal',
'iswaitingforinput',
'jallowClassReloading',
'jarray',
'jautoTranspose',
'jautoUnwrap',
'javaclasspath',
'javalibrarypath',
'jcast',
'jcompile',
'jconvMatrixMethod',
'jcreatejar',
'jdeff',
'jdisableTrace',
'jenableTrace',
'jexists',
'jgetclassname',
'jgetfield',
'jgetfields',
'jgetinfo',
'jgetmethods',
'jimport',
'jinvoke',
'jinvoke_db',
'jnewInstance',
'jremove',
'jsetfield',
'junwrap',
'junwraprem',
'jwrap',
'jwrapinfloat',
'kron',
'lasterror',
'ldiv',
'ldivf',
'legendre',
'length',
'lib',
'librarieslist',
'libraryinfo',
'light',
'linear_interpn',
'lines',
'link',
'linmeq',
'list',
'listvar_in_hdf5',
'load',
'loadGui',
'loadScicos',
'loadXcos',
'loadfftwlibrary',
'loadhistory',
'log',
'log1p',
'lsq',
'lsq_splin',
'lsqrsolve',
'lsslist',
'lstcat',
'lstsize',
'ltitr',
'lu',
'ludel',
'lufact',
'luget',
'lusolve',
'macr2lst',
'macr2tree',
'matfile_close',
'matfile_listvar',
'matfile_open',
'matfile_varreadnext',
'matfile_varwrite',
'matrix',
'max',
'maxfiles',
'mclearerr',
'mclose',
'meof',
'merror',
'messagebox',
'mfprintf',
'mfscanf',
'mget',
'mgeti',
'mgetl',
'mgetstr',
'min',
'mlist',
'mode',
'model2blk',
'mopen',
'move',
'movefile',
'mprintf',
'mput',
'mputl',
'mputstr',
'mscanf',
'mseek',
'msprintf',
'msscanf',
'mtell',
'mtlb_mode',
'mtlb_sparse',
'mucomp',
'mulf',
'name2rgb',
'nearfloat',
'newaxes',
'newest',
'newfun',
'nnz',
'norm',
'notify',
'number_properties',
'ode',
'odedc',
'ones',
'openged',
'opentk',
'optim',
'or',
'ordmmd',
'parallel_concurrency',
'parallel_run',
'param3d',
'param3d1',
'part',
'pathconvert',
'pathsep',
'phase_simulation',
'plot2d',
'plot2d1',
'plot2d2',
'plot2d3',
'plot2d4',
'plot3d',
'plot3d1',
'plotbrowser',
'pointer_xproperty',
'poly',
'ppol',
'pppdiv',
'predef',
'preferences',
'print',
'printf',
'printfigure',
'printsetupbox',
'prod',
'progressionbar',
'prompt',
'pwd',
'qld',
'qp_solve',
'qr',
'raise_window',
'rand',
'rankqr',
'rat',
'rcond',
'rdivf',
'read',
'read4b',
'read_csv',
'readb',
'readgateway',
'readmps',
'real',
'realtime',
'realtimeinit',
'regexp',
'relocate_handle',
'remez',
'removeModulePreferences',
'removedir',
'removelinehistory',
'res_with_prec',
'resethistory',
'residu',
'resume',
'return',
'ricc',
'rlist',
'roots',
'rotate_axes',
'round',
'rpem',
'rtitr',
'rubberbox',
'save',
'saveGui',
'saveafterncommands',
'saveconsecutivecommands',
'savehistory',
'schur',
'sci_haltscicos',
'sci_tree2',
'sci_tree3',
'sci_tree4',
'sciargs',
'scicos_debug',
'scicos_debug_count',
'scicos_time',
'scicosim',
'scinotes',
'sctree',
'semidef',
'set',
'set_blockerror',
'set_fftw_wisdom',
'set_xproperty',
'setbpt',
'setdefaultlanguage',
'setenv',
'setfield',
'sethistoryfile',
'setlanguage',
'setlookandfeel',
'setmenu',
'sfact',
'sfinit',
'show_window',
'sident',
'sig2data',
'sign',
'simp',
'simp_mode',
'sin',
'size',
'slash',
'sleep',
'sorder',
'sparse',
'spchol',
'spcompack',
'spec',
'spget',
'splin',
'splin2d',
'splin3d',
'splitURL',
'spones',
'sprintf',
'sqrt',
'stacksize',
'str2code',
'strcat',
'strchr',
'strcmp',
'strcspn',
'strindex',
'string',
'stringbox',
'stripblanks',
'strncpy',
'strrchr',
'strrev',
'strsplit',
'strspn',
'strstr',
'strsubst',
'strtod',
'strtok',
'subf',
'sum',
'svd',
'swap_handles',
'symfcti',
'syredi',
'system_getproperty',
'system_setproperty',
'ta2lpd',
'tan',
'taucs_chdel',
'taucs_chfact',
'taucs_chget',
'taucs_chinfo',
'taucs_chsolve',
'tempname',
'testmatrix',
'timer',
'tlist',
'tohome',
'tokens',
'toolbar',
'toprint',
'tr_zer',
'tril',
'triu',
'type',
'typename',
'uiDisplayTree',
'uicontextmenu',
'uicontrol',
'uigetcolor',
'uigetdir',
'uigetfile',
'uigetfont',
'uimenu',
'uint16',
'uint32',
'uint8',
'uipopup',
'uiputfile',
'uiwait',
'ulink',
'umf_ludel',
'umf_lufact',
'umf_luget',
'umf_luinfo',
'umf_lusolve',
'umfpack',
'unglue',
'unix',
'unsetmenu',
'unzoom',
'updatebrowsevar',
'usecanvas',
'useeditor',
'user',
'var2vec',
'varn',
'vec2var',
'waitbar',
'warnBlockByUID',
'warning',
'what',
'where',
'whereis',
'who',
'winsid',
'with_module',
'writb',
'write',
'write4b',
'write_csv',
'x_choose',
'x_choose_modeless',
'x_dialog',
'x_mdialog',
'xarc',
'xarcs',
'xarrows',
'xchange',
'xchoicesi',
'xclick',
'xcos',
'xcosAddToolsMenu',
'xcosConfigureXmlFile',
'xcosDiagramToScilab',
'xcosPalCategoryAdd',
'xcosPalDelete',
'xcosPalDisable',
'xcosPalEnable',
'xcosPalGenerateIcon',
'xcosPalGet',
'xcosPalLoad',
'xcosPalMove',
'xcosSimulationStarted',
'xcosUpdateBlock',
'xdel',
'xend',
'xfarc',
'xfarcs',
'xfpoly',
'xfpolys',
'xfrect',
'xget',
'xgetmouse',
'xgraduate',
'xgrid',
'xinit',
'xlfont',
'xls_open',
'xls_read',
'xmlAddNs',
'xmlAppend',
'xmlAsNumber',
'xmlAsText',
'xmlDTD',
'xmlDelete',
'xmlDocument',
'xmlDump',
'xmlElement',
'xmlFormat',
'xmlGetNsByHref',
'xmlGetNsByPrefix',
'xmlGetOpenDocs',
'xmlIsValidObject',
'xmlName',
'xmlNs',
'xmlRead',
'xmlReadStr',
'xmlRelaxNG',
'xmlRemove',
'xmlSchema',
'xmlSetAttributes',
'xmlValidate',
'xmlWrite',
'xmlXPath',
'xname',
'xpause',
'xpoly',
'xpolys',
'xrect',
'xrects',
'xs2bmp',
'xs2emf',
'xs2eps',
'xs2gif',
'xs2jpg',
'xs2pdf',
'xs2png',
'xs2ppm',
'xs2ps',
'xs2svg',
'xsegs',
'xset',
'xstring',
'xstringb',
'xtitle',
'zeros',
'znaupd',
'zneupd',
'zoom_rect',
)
macros_kw = (
'!_deff_wrapper',
'%0_i_st',
'%3d_i_h',
'%Block_xcosUpdateBlock',
'%TNELDER_p',
'%TNELDER_string',
'%TNMPLOT_p',
'%TNMPLOT_string',
'%TOPTIM_p',
'%TOPTIM_string',
'%TSIMPLEX_p',
'%TSIMPLEX_string',
'%_EVoid_p',
'%_gsort',
'%_listvarinfile',
'%_rlist',
'%_save',
'%_sodload',
'%_strsplit',
'%_unwrap',
'%ar_p',
'%asn',
'%b_a_b',
'%b_a_s',
'%b_c_s',
'%b_c_spb',
'%b_cumprod',
'%b_cumsum',
'%b_d_s',
'%b_diag',
'%b_e',
'%b_f_s',
'%b_f_spb',
'%b_g_s',
'%b_g_spb',
'%b_grand',
'%b_h_s',
'%b_h_spb',
'%b_i_b',
'%b_i_ce',
'%b_i_h',
'%b_i_hm',
'%b_i_s',
'%b_i_sp',
'%b_i_spb',
'%b_i_st',
'%b_iconvert',
'%b_l_b',
'%b_l_s',
'%b_m_b',
'%b_m_s',
'%b_matrix',
'%b_n_hm',
'%b_o_hm',
'%b_p_s',
'%b_prod',
'%b_r_b',
'%b_r_s',
'%b_s_b',
'%b_s_s',
'%b_string',
'%b_sum',
'%b_tril',
'%b_triu',
'%b_x_b',
'%b_x_s',
'%bicg',
'%bicgstab',
'%c_a_c',
'%c_b_c',
'%c_b_s',
'%c_diag',
'%c_dsearch',
'%c_e',
'%c_eye',
'%c_f_s',
'%c_grand',
'%c_i_c',
'%c_i_ce',
'%c_i_h',
'%c_i_hm',
'%c_i_lss',
'%c_i_r',
'%c_i_s',
'%c_i_st',
'%c_matrix',
'%c_n_l',
'%c_n_st',
'%c_o_l',
'%c_o_st',
'%c_ones',
'%c_rand',
'%c_tril',
'%c_triu',
'%cblock_c_cblock',
'%cblock_c_s',
'%cblock_e',
'%cblock_f_cblock',
'%cblock_p',
'%cblock_size',
'%ce_6',
'%ce_c_ce',
'%ce_e',
'%ce_f_ce',
'%ce_i_ce',
'%ce_i_s',
'%ce_i_st',
'%ce_matrix',
'%ce_p',
'%ce_size',
'%ce_string',
'%ce_t',
'%cgs',
'%champdat_i_h',
'%choose',
'%diagram_xcos',
'%dir_p',
'%fptr_i_st',
'%grand_perm',
'%grayplot_i_h',
'%h_i_st',
'%hmS_k_hmS_generic',
'%hm_1_hm',
'%hm_1_s',
'%hm_2_hm',
'%hm_2_s',
'%hm_3_hm',
'%hm_3_s',
'%hm_4_hm',
'%hm_4_s',
'%hm_5',
'%hm_a_hm',
'%hm_a_r',
'%hm_a_s',
'%hm_abs',
'%hm_and',
'%hm_bool2s',
'%hm_c_hm',
'%hm_ceil',
'%hm_conj',
'%hm_cos',
'%hm_cumprod',
'%hm_cumsum',
'%hm_d_hm',
'%hm_d_s',
'%hm_degree',
'%hm_dsearch',
'%hm_e',
'%hm_exp',
'%hm_eye',
'%hm_f_hm',
'%hm_find',
'%hm_floor',
'%hm_g_hm',
'%hm_grand',
'%hm_gsort',
'%hm_h_hm',
'%hm_i_b',
'%hm_i_ce',
'%hm_i_h',
'%hm_i_hm',
'%hm_i_i',
'%hm_i_p',
'%hm_i_r',
'%hm_i_s',
'%hm_i_st',
'%hm_iconvert',
'%hm_imag',
'%hm_int',
'%hm_isnan',
'%hm_isreal',
'%hm_j_hm',
'%hm_j_s',
'%hm_k_hm',
'%hm_k_s',
'%hm_log',
'%hm_m_p',
'%hm_m_r',
'%hm_m_s',
'%hm_matrix',
'%hm_max',
'%hm_mean',
'%hm_median',
'%hm_min',
'%hm_n_b',
'%hm_n_c',
'%hm_n_hm',
'%hm_n_i',
'%hm_n_p',
'%hm_n_s',
'%hm_o_b',
'%hm_o_c',
'%hm_o_hm',
'%hm_o_i',
'%hm_o_p',
'%hm_o_s',
'%hm_ones',
'%hm_or',
'%hm_p',
'%hm_prod',
'%hm_q_hm',
'%hm_r_s',
'%hm_rand',
'%hm_real',
'%hm_round',
'%hm_s',
'%hm_s_hm',
'%hm_s_r',
'%hm_s_s',
'%hm_sign',
'%hm_sin',
'%hm_size',
'%hm_sqrt',
'%hm_stdev',
'%hm_string',
'%hm_sum',
'%hm_x_hm',
'%hm_x_p',
'%hm_x_s',
'%hm_zeros',
'%i_1_s',
'%i_2_s',
'%i_3_s',
'%i_4_s',
'%i_Matplot',
'%i_a_i',
'%i_a_s',
'%i_and',
'%i_ascii',
'%i_b_s',
'%i_bezout',
'%i_champ',
'%i_champ1',
'%i_contour',
'%i_contour2d',
'%i_d_i',
'%i_d_s',
'%i_dsearch',
'%i_e',
'%i_fft',
'%i_g_i',
'%i_gcd',
'%i_grand',
'%i_h_i',
'%i_i_ce',
'%i_i_h',
'%i_i_hm',
'%i_i_i',
'%i_i_s',
'%i_i_st',
'%i_j_i',
'%i_j_s',
'%i_l_s',
'%i_lcm',
'%i_length',
'%i_m_i',
'%i_m_s',
'%i_mfprintf',
'%i_mprintf',
'%i_msprintf',
'%i_n_s',
'%i_o_s',
'%i_or',
'%i_p_i',
'%i_p_s',
'%i_plot2d',
'%i_plot2d1',
'%i_plot2d2',
'%i_q_s',
'%i_r_i',
'%i_r_s',
'%i_round',
'%i_s_i',
'%i_s_s',
'%i_sign',
'%i_string',
'%i_x_i',
'%i_x_s',
'%ip_a_s',
'%ip_i_st',
'%ip_m_s',
'%ip_n_ip',
'%ip_o_ip',
'%ip_p',
'%ip_part',
'%ip_s_s',
'%ip_string',
'%k',
'%l_i_h',
'%l_i_s',
'%l_i_st',
'%l_isequal',
'%l_n_c',
'%l_n_l',
'%l_n_m',
'%l_n_p',
'%l_n_s',
'%l_n_st',
'%l_o_c',
'%l_o_l',
'%l_o_m',
'%l_o_p',
'%l_o_s',
'%l_o_st',
'%lss_a_lss',
'%lss_a_p',
'%lss_a_r',
'%lss_a_s',
'%lss_c_lss',
'%lss_c_p',
'%lss_c_r',
'%lss_c_s',
'%lss_e',
'%lss_eye',
'%lss_f_lss',
'%lss_f_p',
'%lss_f_r',
'%lss_f_s',
'%lss_i_ce',
'%lss_i_lss',
'%lss_i_p',
'%lss_i_r',
'%lss_i_s',
'%lss_i_st',
'%lss_inv',
'%lss_l_lss',
'%lss_l_p',
'%lss_l_r',
'%lss_l_s',
'%lss_m_lss',
'%lss_m_p',
'%lss_m_r',
'%lss_m_s',
'%lss_n_lss',
'%lss_n_p',
'%lss_n_r',
'%lss_n_s',
'%lss_norm',
'%lss_o_lss',
'%lss_o_p',
'%lss_o_r',
'%lss_o_s',
'%lss_ones',
'%lss_r_lss',
'%lss_r_p',
'%lss_r_r',
'%lss_r_s',
'%lss_rand',
'%lss_s',
'%lss_s_lss',
'%lss_s_p',
'%lss_s_r',
'%lss_s_s',
'%lss_size',
'%lss_t',
'%lss_v_lss',
'%lss_v_p',
'%lss_v_r',
'%lss_v_s',
'%lt_i_s',
'%m_n_l',
'%m_o_l',
'%mc_i_h',
'%mc_i_s',
'%mc_i_st',
'%mc_n_st',
'%mc_o_st',
'%mc_string',
'%mps_p',
'%mps_string',
'%msp_a_s',
'%msp_abs',
'%msp_e',
'%msp_find',
'%msp_i_s',
'%msp_i_st',
'%msp_length',
'%msp_m_s',
'%msp_maxi',
'%msp_n_msp',
'%msp_nnz',
'%msp_o_msp',
'%msp_p',
'%msp_sparse',
'%msp_spones',
'%msp_t',
'%p_a_lss',
'%p_a_r',
'%p_c_lss',
'%p_c_r',
'%p_cumprod',
'%p_cumsum',
'%p_d_p',
'%p_d_r',
'%p_d_s',
'%p_det',
'%p_e',
'%p_f_lss',
'%p_f_r',
'%p_grand',
'%p_i_ce',
'%p_i_h',
'%p_i_hm',
'%p_i_lss',
'%p_i_p',
'%p_i_r',
'%p_i_s',
'%p_i_st',
'%p_inv',
'%p_j_s',
'%p_k_p',
'%p_k_r',
'%p_k_s',
'%p_l_lss',
'%p_l_p',
'%p_l_r',
'%p_l_s',
'%p_m_hm',
'%p_m_lss',
'%p_m_r',
'%p_matrix',
'%p_n_l',
'%p_n_lss',
'%p_n_r',
'%p_o_l',
'%p_o_lss',
'%p_o_r',
'%p_o_sp',
'%p_p_s',
'%p_part',
'%p_prod',
'%p_q_p',
'%p_q_r',
'%p_q_s',
'%p_r_lss',
'%p_r_p',
'%p_r_r',
'%p_r_s',
'%p_s_lss',
'%p_s_r',
'%p_simp',
'%p_string',
'%p_sum',
'%p_v_lss',
'%p_v_p',
'%p_v_r',
'%p_v_s',
'%p_x_hm',
'%p_x_r',
'%p_y_p',
'%p_y_r',
'%p_y_s',
'%p_z_p',
'%p_z_r',
'%p_z_s',
'%pcg',
'%plist_p',
'%plist_string',
'%r_0',
'%r_a_hm',
'%r_a_lss',
'%r_a_p',
'%r_a_r',
'%r_a_s',
'%r_c_lss',
'%r_c_p',
'%r_c_r',
'%r_c_s',
'%r_clean',
'%r_cumprod',
'%r_cumsum',
'%r_d_p',
'%r_d_r',
'%r_d_s',
'%r_det',
'%r_diag',
'%r_e',
'%r_eye',
'%r_f_lss',
'%r_f_p',
'%r_f_r',
'%r_f_s',
'%r_i_ce',
'%r_i_hm',
'%r_i_lss',
'%r_i_p',
'%r_i_r',
'%r_i_s',
'%r_i_st',
'%r_inv',
'%r_j_s',
'%r_k_p',
'%r_k_r',
'%r_k_s',
'%r_l_lss',
'%r_l_p',
'%r_l_r',
'%r_l_s',
'%r_m_hm',
'%r_m_lss',
'%r_m_p',
'%r_m_r',
'%r_m_s',
'%r_matrix',
'%r_n_lss',
'%r_n_p',
'%r_n_r',
'%r_n_s',
'%r_norm',
'%r_o_lss',
'%r_o_p',
'%r_o_r',
'%r_o_s',
'%r_ones',
'%r_p',
'%r_p_s',
'%r_prod',
'%r_q_p',
'%r_q_r',
'%r_q_s',
'%r_r_lss',
'%r_r_p',
'%r_r_r',
'%r_r_s',
'%r_rand',
'%r_s',
'%r_s_hm',
'%r_s_lss',
'%r_s_p',
'%r_s_r',
'%r_s_s',
'%r_simp',
'%r_size',
'%r_string',
'%r_sum',
'%r_t',
'%r_tril',
'%r_triu',
'%r_v_lss',
'%r_v_p',
'%r_v_r',
'%r_v_s',
'%r_varn',
'%r_x_p',
'%r_x_r',
'%r_x_s',
'%r_y_p',
'%r_y_r',
'%r_y_s',
'%r_z_p',
'%r_z_r',
'%r_z_s',
'%s_1_hm',
'%s_1_i',
'%s_2_hm',
'%s_2_i',
'%s_3_hm',
'%s_3_i',
'%s_4_hm',
'%s_4_i',
'%s_5',
'%s_a_b',
'%s_a_hm',
'%s_a_i',
'%s_a_ip',
'%s_a_lss',
'%s_a_msp',
'%s_a_r',
'%s_a_sp',
'%s_and',
'%s_b_i',
'%s_b_s',
'%s_bezout',
'%s_c_b',
'%s_c_cblock',
'%s_c_lss',
'%s_c_r',
'%s_c_sp',
'%s_d_b',
'%s_d_i',
'%s_d_p',
'%s_d_r',
'%s_d_sp',
'%s_e',
'%s_f_b',
'%s_f_cblock',
'%s_f_lss',
'%s_f_r',
'%s_f_sp',
'%s_g_b',
'%s_g_s',
'%s_gcd',
'%s_grand',
'%s_h_b',
'%s_h_s',
'%s_i_b',
'%s_i_c',
'%s_i_ce',
'%s_i_h',
'%s_i_hm',
'%s_i_i',
'%s_i_lss',
'%s_i_p',
'%s_i_r',
'%s_i_s',
'%s_i_sp',
'%s_i_spb',
'%s_i_st',
'%s_j_i',
'%s_k_hm',
'%s_k_p',
'%s_k_r',
'%s_k_sp',
'%s_l_b',
'%s_l_hm',
'%s_l_i',
'%s_l_lss',
'%s_l_p',
'%s_l_r',
'%s_l_s',
'%s_l_sp',
'%s_lcm',
'%s_m_b',
'%s_m_hm',
'%s_m_i',
'%s_m_ip',
'%s_m_lss',
'%s_m_msp',
'%s_m_r',
'%s_matrix',
'%s_n_hm',
'%s_n_i',
'%s_n_l',
'%s_n_lss',
'%s_n_r',
'%s_n_st',
'%s_o_hm',
'%s_o_i',
'%s_o_l',
'%s_o_lss',
'%s_o_r',
'%s_o_st',
'%s_or',
'%s_p_b',
'%s_p_i',
'%s_pow',
'%s_q_hm',
'%s_q_i',
'%s_q_p',
'%s_q_r',
'%s_q_sp',
'%s_r_b',
'%s_r_i',
'%s_r_lss',
'%s_r_p',
'%s_r_r',
'%s_r_s',
'%s_r_sp',
'%s_s_b',
'%s_s_hm',
'%s_s_i',
'%s_s_ip',
'%s_s_lss',
'%s_s_r',
'%s_s_sp',
'%s_simp',
'%s_v_lss',
'%s_v_p',
'%s_v_r',
'%s_v_s',
'%s_x_b',
'%s_x_hm',
'%s_x_i',
'%s_x_r',
'%s_y_p',
'%s_y_r',
'%s_y_sp',
'%s_z_p',
'%s_z_r',
'%s_z_sp',
'%sn',
'%sp_a_s',
'%sp_a_sp',
'%sp_and',
'%sp_c_s',
'%sp_ceil',
'%sp_conj',
'%sp_cos',
'%sp_cumprod',
'%sp_cumsum',
'%sp_d_s',
'%sp_d_sp',
'%sp_det',
'%sp_diag',
'%sp_e',
'%sp_exp',
'%sp_f_s',
'%sp_floor',
'%sp_grand',
'%sp_gsort',
'%sp_i_ce',
'%sp_i_h',
'%sp_i_s',
'%sp_i_sp',
'%sp_i_st',
'%sp_int',
'%sp_inv',
'%sp_k_s',
'%sp_k_sp',
'%sp_l_s',
'%sp_l_sp',
'%sp_length',
'%sp_max',
'%sp_min',
'%sp_norm',
'%sp_or',
'%sp_p_s',
'%sp_prod',
'%sp_q_s',
'%sp_q_sp',
'%sp_r_s',
'%sp_r_sp',
'%sp_round',
'%sp_s_s',
'%sp_s_sp',
'%sp_sin',
'%sp_sqrt',
'%sp_string',
'%sp_sum',
'%sp_tril',
'%sp_triu',
'%sp_y_s',
'%sp_y_sp',
'%sp_z_s',
'%sp_z_sp',
'%spb_and',
'%spb_c_b',
'%spb_cumprod',
'%spb_cumsum',
'%spb_diag',
'%spb_e',
'%spb_f_b',
'%spb_g_b',
'%spb_g_spb',
'%spb_h_b',
'%spb_h_spb',
'%spb_i_b',
'%spb_i_ce',
'%spb_i_h',
'%spb_i_st',
'%spb_or',
'%spb_prod',
'%spb_sum',
'%spb_tril',
'%spb_triu',
'%st_6',
'%st_c_st',
'%st_e',
'%st_f_st',
'%st_i_b',
'%st_i_c',
'%st_i_fptr',
'%st_i_h',
'%st_i_i',
'%st_i_ip',
'%st_i_lss',
'%st_i_msp',
'%st_i_p',
'%st_i_r',
'%st_i_s',
'%st_i_sp',
'%st_i_spb',
'%st_i_st',
'%st_matrix',
'%st_n_c',
'%st_n_l',
'%st_n_mc',
'%st_n_p',
'%st_n_s',
'%st_o_c',
'%st_o_l',
'%st_o_mc',
'%st_o_p',
'%st_o_s',
'%st_o_tl',
'%st_p',
'%st_size',
'%st_string',
'%st_t',
'%ticks_i_h',
'%xls_e',
'%xls_p',
'%xlssheet_e',
'%xlssheet_p',
'%xlssheet_size',
'%xlssheet_string',
'DominationRank',
'G_make',
'IsAScalar',
'NDcost',
'OS_Version',
'PlotSparse',
'ReadHBSparse',
'TCL_CreateSlave',
'abcd',
'abinv',
'accept_func_default',
'accept_func_vfsa',
'acf',
'acosd',
'acosh',
'acoshm',
'acosm',
'acot',
'acotd',
'acoth',
'acsc',
'acscd',
'acsch',
'add_demo',
'add_help_chapter',
'add_module_help_chapter',
'add_param',
'add_profiling',
'adj2sp',
'aff2ab',
'ana_style',
'analpf',
'analyze',
'aplat',
'arhnk',
'arl2',
'arma2p',
'arma2ss',
'armac',
'armax',
'armax1',
'arobasestring2strings',
'arsimul',
'ascii2string',
'asciimat',
'asec',
'asecd',
'asech',
'asind',
'asinh',
'asinhm',
'asinm',
'assert_checkalmostequal',
'assert_checkequal',
'assert_checkerror',
'assert_checkfalse',
'assert_checkfilesequal',
'assert_checktrue',
'assert_comparecomplex',
'assert_computedigits',
'assert_cond2reltol',
'assert_cond2reqdigits',
'assert_generror',
'atand',
'atanh',
'atanhm',
'atanm',
'atomsAutoload',
'atomsAutoloadAdd',
'atomsAutoloadDel',
'atomsAutoloadList',
'atomsCategoryList',
'atomsCheckModule',
'atomsDepTreeShow',
'atomsGetConfig',
'atomsGetInstalled',
'atomsGetInstalledPath',
'atomsGetLoaded',
'atomsGetLoadedPath',
'atomsInstall',
'atomsIsInstalled',
'atomsIsLoaded',
'atomsList',
'atomsLoad',
'atomsQuit',
'atomsRemove',
'atomsRepositoryAdd',
'atomsRepositoryDel',
'atomsRepositoryList',
'atomsRestoreConfig',
'atomsSaveConfig',
'atomsSearch',
'atomsSetConfig',
'atomsShow',
'atomsSystemInit',
'atomsSystemUpdate',
'atomsTest',
'atomsUpdate',
'atomsVersion',
'augment',
'auread',
'auwrite',
'balreal',
'bench_run',
'bilin',
'bilt',
'bin2dec',
'binomial',
'bitand',
'bitcmp',
'bitget',
'bitor',
'bitset',
'bitxor',
'black',
'blanks',
'bloc2exp',
'bloc2ss',
'block_parameter_error',
'bode',
'bode_asymp',
'bstap',
'buttmag',
'bvodeS',
'bytecode',
'bytecodewalk',
'cainv',
'calendar',
'calerf',
'calfrq',
'canon',
'casc',
'cat',
'cat_code',
'cb_m2sci_gui',
'ccontrg',
'cell',
'cell2mat',
'cellstr',
'center',
'cepstrum',
'cfspec',
'char',
'chart',
'cheb1mag',
'cheb2mag',
'check_gateways',
'check_modules_xml',
'check_versions',
'chepol',
'chfact',
'chsolve',
'classmarkov',
'clean_help',
'clock',
'cls2dls',
'cmb_lin',
'cmndred',
'cmoment',
'coding_ga_binary',
'coding_ga_identity',
'coff',
'coffg',
'colcomp',
'colcompr',
'colinout',
'colregul',
'companion',
'complex',
'compute_initial_temp',
'cond',
'cond2sp',
'condestsp',
'configure_msifort',
'configure_msvc',
'conjgrad',
'cont_frm',
'cont_mat',
'contrss',
'conv',
'convert_to_float',
'convertindex',
'convol',
'convol2d',
'copfac',
'correl',
'cosd',
'cosh',
'coshm',
'cosm',
'cotd',
'cotg',
'coth',
'cothm',
'cov',
'covar',
'createXConfiguration',
'createfun',
'createstruct',
'cross',
'crossover_ga_binary',
'crossover_ga_default',
'csc',
'cscd',
'csch',
'csgn',
'csim',
'cspect',
'ctr_gram',
'czt',
'dae',
'daeoptions',
'damp',
'datafit',
'date',
'datenum',
'datevec',
'dbphi',
'dcf',
'ddp',
'dec2bin',
'dec2hex',
'dec2oct',
'del_help_chapter',
'del_module_help_chapter',
'demo_begin',
'demo_choose',
'demo_compiler',
'demo_end',
'demo_file_choice',
'demo_folder_choice',
'demo_function_choice',
'demo_gui',
'demo_run',
'demo_viewCode',
'denom',
'derivat',
'derivative',
'des2ss',
'des2tf',
'detectmsifort64tools',
'detectmsvc64tools',
'determ',
'detr',
'detrend',
'devtools_run_builder',
'dhnorm',
'diff',
'diophant',
'dir',
'dirname',
'dispfiles',
'dllinfo',
'dscr',
'dsimul',
'dt_ility',
'dtsi',
'edit',
'edit_error',
'editor',
'eigenmarkov',
'eigs',
'ell1mag',
'enlarge_shape',
'entropy',
'eomday',
'epred',
'eqfir',
'eqiir',
'equil',
'equil1',
'erfinv',
'etime',
'eval',
'evans',
'evstr',
'example_run',
'expression2code',
'extract_help_examples',
'factor',
'factorial',
'factors',
'faurre',
'ffilt',
'fft2',
'fftshift',
'fieldnames',
'filt_sinc',
'filter',
'findABCD',
'findAC',
'findBDK',
'findR',
'find_freq',
'find_links',
'find_scicos_version',
'findm',
'findmsifortcompiler',
'findmsvccompiler',
'findx0BD',
'firstnonsingleton',
'fix',
'fixedpointgcd',
'flipdim',
'flts',
'fminsearch',
'formatBlackTip',
'formatBodeMagTip',
'formatBodePhaseTip',
'formatGainplotTip',
'formatHallModuleTip',
'formatHallPhaseTip',
'formatNicholsGainTip',
'formatNicholsPhaseTip',
'formatNyquistTip',
'formatPhaseplotTip',
'formatSgridDampingTip',
'formatSgridFreqTip',
'formatZgridDampingTip',
'formatZgridFreqTip',
'format_txt',
'fourplan',
'frep2tf',
'freson',
'frfit',
'frmag',
'fseek_origin',
'fsfirlin',
'fspec',
'fspecg',
'fstabst',
'ftest',
'ftuneq',
'fullfile',
'fullrf',
'fullrfk',
'fun2string',
'g_margin',
'gainplot',
'gamitg',
'gcare',
'gcd',
'gencompilationflags_unix',
'generateBlockImage',
'generateBlockImages',
'generic_i_ce',
'generic_i_h',
'generic_i_hm',
'generic_i_s',
'generic_i_st',
'genlib',
'genmarkov',
'geomean',
'getDiagramVersion',
'getModelicaPath',
'getPreferencesValue',
'get_file_path',
'get_function_path',
'get_param',
'get_profile',
'get_scicos_version',
'getd',
'getscilabkeywords',
'getshell',
'gettklib',
'gfare',
'gfrancis',
'givens',
'glever',
'gmres',
'group',
'gschur',
'gspec',
'gtild',
'h2norm',
'h_cl',
'h_inf',
'h_inf_st',
'h_norm',
'hallchart',
'halt',
'hank',
'hankelsv',
'harmean',
'haveacompiler',
'head_comments',
'help_from_sci',
'help_skeleton',
'hermit',
'hex2dec',
'hilb',
'hilbert',
'histc',
'horner',
'householder',
'hrmt',
'htrianr',
'hypermat',
'idct',
'idst',
'ifft',
'ifftshift',
'iir',
'iirgroup',
'iirlp',
'iirmod',
'ilib_build',
'ilib_build_jar',
'ilib_compile',
'ilib_for_link',
'ilib_gen_Make',
'ilib_gen_Make_unix',
'ilib_gen_cleaner',
'ilib_gen_gateway',
'ilib_gen_loader',
'ilib_include_flag',
'ilib_mex_build',
'im_inv',
'importScicosDiagram',
'importScicosPal',
'importXcosDiagram',
'imrep2ss',
'ind2sub',
'inistate',
'init_ga_default',
'init_param',
'initial_scicos_tables',
'input',
'instruction2code',
'intc',
'intdec',
'integrate',
'interp1',
'interpln',
'intersect',
'intl',
'intsplin',
'inttrap',
'inv_coeff',
'invr',
'invrs',
'invsyslin',
'iqr',
'isLeapYear',
'is_absolute_path',
'is_param',
'iscell',
'iscellstr',
'iscolumn',
'isempty',
'isfield',
'isinf',
'ismatrix',
'isnan',
'isrow',
'isscalar',
'issparse',
'issquare',
'isstruct',
'isvector',
'jmat',
'justify',
'kalm',
'karmarkar',
'kernel',
'kpure',
'krac2',
'kroneck',
'lattn',
'lattp',
'launchtest',
'lcf',
'lcm',
'lcmdiag',
'leastsq',
'leqe',
'leqr',
'lev',
'levin',
'lex_sort',
'lft',
'lin',
'lin2mu',
'lincos',
'lindquist',
'linf',
'linfn',
'linsolve',
'linspace',
'list2vec',
'list_param',
'listfiles',
'listfunctions',
'listvarinfile',
'lmisolver',
'lmitool',
'loadXcosLibs',
'loadmatfile',
'loadwave',
'log10',
'log2',
'logm',
'logspace',
'lqe',
'lqg',
'lqg2stan',
'lqg_ltr',
'lqr',
'ls',
'lyap',
'm2sci_gui',
'm_circle',
'macglov',
'macrovar',
'mad',
'makecell',
'manedit',
'mapsound',
'markp2ss',
'matfile2sci',
'mdelete',
'mean',
'meanf',
'median',
'members',
'mese',
'meshgrid',
'mfft',
'mfile2sci',
'minreal',
'minss',
'mkdir',
'modulo',
'moment',
'mrfit',
'msd',
'mstr2sci',
'mtlb',
'mtlb_0',
'mtlb_a',
'mtlb_all',
'mtlb_any',
'mtlb_axes',
'mtlb_axis',
'mtlb_beta',
'mtlb_box',
'mtlb_choices',
'mtlb_close',
'mtlb_colordef',
'mtlb_cond',
'mtlb_cov',
'mtlb_cumprod',
'mtlb_cumsum',
'mtlb_dec2hex',
'mtlb_delete',
'mtlb_diag',
'mtlb_diff',
'mtlb_dir',
'mtlb_double',
'mtlb_e',
'mtlb_echo',
'mtlb_error',
'mtlb_eval',
'mtlb_exist',
'mtlb_eye',
'mtlb_false',
'mtlb_fft',
'mtlb_fftshift',
'mtlb_filter',
'mtlb_find',
'mtlb_findstr',
'mtlb_fliplr',
'mtlb_fopen',
'mtlb_format',
'mtlb_fprintf',
'mtlb_fread',
'mtlb_fscanf',
'mtlb_full',
'mtlb_fwrite',
'mtlb_get',
'mtlb_grid',
'mtlb_hold',
'mtlb_i',
'mtlb_ifft',
'mtlb_image',
'mtlb_imp',
'mtlb_int16',
'mtlb_int32',
'mtlb_int8',
'mtlb_is',
'mtlb_isa',
'mtlb_isfield',
'mtlb_isletter',
'mtlb_isspace',
'mtlb_l',
'mtlb_legendre',
'mtlb_linspace',
'mtlb_logic',
'mtlb_logical',
'mtlb_loglog',
'mtlb_lower',
'mtlb_max',
'mtlb_mean',
'mtlb_median',
'mtlb_mesh',
'mtlb_meshdom',
'mtlb_min',
'mtlb_more',
'mtlb_num2str',
'mtlb_ones',
'mtlb_pcolor',
'mtlb_plot',
'mtlb_prod',
'mtlb_qr',
'mtlb_qz',
'mtlb_rand',
'mtlb_randn',
'mtlb_rcond',
'mtlb_realmax',
'mtlb_realmin',
'mtlb_s',
'mtlb_semilogx',
'mtlb_semilogy',
'mtlb_setstr',
'mtlb_size',
'mtlb_sort',
'mtlb_sortrows',
'mtlb_sprintf',
'mtlb_sscanf',
'mtlb_std',
'mtlb_strcmp',
'mtlb_strcmpi',
'mtlb_strfind',
'mtlb_strrep',
'mtlb_subplot',
'mtlb_sum',
'mtlb_t',
'mtlb_toeplitz',
'mtlb_tril',
'mtlb_triu',
'mtlb_true',
'mtlb_type',
'mtlb_uint16',
'mtlb_uint32',
'mtlb_uint8',
'mtlb_upper',
'mtlb_var',
'mtlb_zeros',
'mu2lin',
'mutation_ga_binary',
'mutation_ga_default',
'mvcorrel',
'mvvacov',
'nancumsum',
'nand2mean',
'nanmax',
'nanmean',
'nanmeanf',
'nanmedian',
'nanmin',
'nanreglin',
'nanstdev',
'nansum',
'narsimul',
'ndgrid',
'ndims',
'nehari',
'neigh_func_csa',
'neigh_func_default',
'neigh_func_fsa',
'neigh_func_vfsa',
'neldermead_cget',
'neldermead_configure',
'neldermead_costf',
'neldermead_defaultoutput',
'neldermead_destroy',
'neldermead_function',
'neldermead_get',
'neldermead_log',
'neldermead_new',
'neldermead_restart',
'neldermead_search',
'neldermead_updatesimp',
'nextpow2',
'nfreq',
'nicholschart',
'nlev',
'nmplot_cget',
'nmplot_configure',
'nmplot_contour',
'nmplot_destroy',
'nmplot_function',
'nmplot_get',
'nmplot_historyplot',
'nmplot_log',
'nmplot_new',
'nmplot_outputcmd',
'nmplot_restart',
'nmplot_search',
'nmplot_simplexhistory',
'noisegen',
'nonreg_test_run',
'now',
'nthroot',
'null',
'num2cell',
'numderivative',
'numdiff',
'numer',
'nyquist',
'nyquistfrequencybounds',
'obs_gram',
'obscont',
'observer',
'obsv_mat',
'obsvss',
'oct2dec',
'odeoptions',
'optim_ga',
'optim_moga',
'optim_nsga',
'optim_nsga2',
'optim_sa',
'optimbase_cget',
'optimbase_checkbounds',
'optimbase_checkcostfun',
'optimbase_checkx0',
'optimbase_configure',
'optimbase_destroy',
'optimbase_function',
'optimbase_get',
'optimbase_hasbounds',
'optimbase_hasconstraints',
'optimbase_hasnlcons',
'optimbase_histget',
'optimbase_histset',
'optimbase_incriter',
'optimbase_isfeasible',
'optimbase_isinbounds',
'optimbase_isinnonlincons',
'optimbase_log',
'optimbase_logshutdown',
'optimbase_logstartup',
'optimbase_new',
'optimbase_outputcmd',
'optimbase_outstruct',
'optimbase_proj2bnds',
'optimbase_set',
'optimbase_stoplog',
'optimbase_terminate',
'optimget',
'optimplotfunccount',
'optimplotfval',
'optimplotx',
'optimset',
'optimsimplex_center',
'optimsimplex_check',
'optimsimplex_compsomefv',
'optimsimplex_computefv',
'optimsimplex_deltafv',
'optimsimplex_deltafvmax',
'optimsimplex_destroy',
'optimsimplex_dirmat',
'optimsimplex_fvmean',
'optimsimplex_fvstdev',
'optimsimplex_fvvariance',
'optimsimplex_getall',
'optimsimplex_getallfv',
'optimsimplex_getallx',
'optimsimplex_getfv',
'optimsimplex_getn',
'optimsimplex_getnbve',
'optimsimplex_getve',
'optimsimplex_getx',
'optimsimplex_gradientfv',
'optimsimplex_log',
'optimsimplex_new',
'optimsimplex_reflect',
'optimsimplex_setall',
'optimsimplex_setallfv',
'optimsimplex_setallx',
'optimsimplex_setfv',
'optimsimplex_setn',
'optimsimplex_setnbve',
'optimsimplex_setve',
'optimsimplex_setx',
'optimsimplex_shrink',
'optimsimplex_size',
'optimsimplex_sort',
'optimsimplex_xbar',
'orth',
'output_ga_default',
'output_moga_default',
'output_nsga2_default',
'output_nsga_default',
'p_margin',
'pack',
'pareto_filter',
'parrot',
'pbig',
'pca',
'pcg',
'pdiv',
'pen2ea',
'pencan',
'pencost',
'penlaur',
'perctl',
'perl',
'perms',
'permute',
'pertrans',
'pfactors',
'pfss',
'phasemag',
'phaseplot',
'phc',
'pinv',
'playsnd',
'plotprofile',
'plzr',
'pmodulo',
'pol2des',
'pol2str',
'polar',
'polfact',
'prbs_a',
'prettyprint',
'primes',
'princomp',
'profile',
'proj',
'projsl',
'projspec',
'psmall',
'pspect',
'qmr',
'qpsolve',
'quart',
'quaskro',
'rafiter',
'randpencil',
'range',
'rank',
'readxls',
'recompilefunction',
'recons',
'reglin',
'regress',
'remezb',
'remove_param',
'remove_profiling',
'repfreq',
'replace_Ix_by_Fx',
'repmat',
'reset_profiling',
'resize_matrix',
'returntoscilab',
'rhs2code',
'ric_desc',
'riccati',
'rmdir',
'routh_t',
'rowcomp',
'rowcompr',
'rowinout',
'rowregul',
'rowshuff',
'rref',
'sample',
'samplef',
'samwr',
'savematfile',
'savewave',
'scanf',
'sci2exp',
'sciGUI_init',
'sci_sparse',
'scicos_getvalue',
'scicos_simulate',
'scicos_workspace_init',
'scisptdemo',
'scitest',
'sdiff',
'sec',
'secd',
'sech',
'selection_ga_elitist',
'selection_ga_random',
'sensi',
'setPreferencesValue',
'set_param',
'setdiff',
'sgrid',
'show_margins',
'show_pca',
'showprofile',
'signm',
'sinc',
'sincd',
'sind',
'sinh',
'sinhm',
'sinm',
'sm2des',
'sm2ss',
'smga',
'smooth',
'solve',
'sound',
'soundsec',
'sp2adj',
'spaninter',
'spanplus',
'spantwo',
'specfact',
'speye',
'sprand',
'spzeros',
'sqroot',
'sqrtm',
'squarewave',
'squeeze',
'srfaur',
'srkf',
'ss2des',
'ss2ss',
'ss2tf',
'sskf',
'ssprint',
'ssrand',
'st_deviation',
'st_i_generic',
'st_ility',
'stabil',
'statgain',
'stdev',
'stdevf',
'steadycos',
'strange',
'strcmpi',
'struct',
'sub2ind',
'sva',
'svplot',
'sylm',
'sylv',
'sysconv',
'sysdiag',
'sysfact',
'syslin',
'syssize',
'system',
'systmat',
'tabul',
'tand',
'tanh',
'tanhm',
'tanm',
'tbx_build_blocks',
'tbx_build_cleaner',
'tbx_build_gateway',
'tbx_build_gateway_clean',
'tbx_build_gateway_loader',
'tbx_build_help',
'tbx_build_help_loader',
'tbx_build_loader',
'tbx_build_localization',
'tbx_build_macros',
'tbx_build_pal_loader',
'tbx_build_src',
'tbx_builder',
'tbx_builder_gateway',
'tbx_builder_gateway_lang',
'tbx_builder_help',
'tbx_builder_help_lang',
'tbx_builder_macros',
'tbx_builder_src',
'tbx_builder_src_lang',
'tbx_generate_pofile',
'temp_law_csa',
'temp_law_default',
'temp_law_fsa',
'temp_law_huang',
'temp_law_vfsa',
'test_clean',
'test_on_columns',
'test_run',
'test_run_level',
'testexamples',
'tf2des',
'tf2ss',
'thrownan',
'tic',
'time_id',
'toc',
'toeplitz',
'tokenpos',
'toolboxes',
'trace',
'trans',
'translatepaths',
'tree2code',
'trfmod',
'trianfml',
'trimmean',
'trisolve',
'trzeros',
'typeof',
'ui_observer',
'union',
'unique',
'unit_test_run',
'unix_g',
'unix_s',
'unix_w',
'unix_x',
'unobs',
'unpack',
'unwrap',
'variance',
'variancef',
'vec2list',
'vectorfind',
'ver',
'warnobsolete',
'wavread',
'wavwrite',
'wcenter',
'weekday',
'wfir',
'wfir_gui',
'whereami',
'who_user',
'whos',
'wiener',
'wigner',
'window',
'winlist',
'with_javasci',
'with_macros_source',
'with_modelica_compiler',
'with_tk',
'xcorr',
'xcosBlockEval',
'xcosBlockInterface',
'xcosCodeGeneration',
'xcosConfigureModelica',
'xcosPal',
'xcosPalAdd',
'xcosPalAddBlock',
'xcosPalExport',
'xcosPalGenerateAllIcons',
'xcosShowBlockWarning',
'xcosValidateBlockSet',
'xcosValidateCompareBlock',
'xcos_compile',
'xcos_debug_gui',
'xcos_run',
'xcos_simulate',
'xcov',
'xmltochm',
'xmltoformat',
'xmltohtml',
'xmltojar',
'xmltopdf',
'xmltops',
'xmltoweb',
'yulewalk',
'zeropen',
'zgrid',
'zpbutt',
'zpch1',
'zpch2',
'zpell',
)
variables_kw = (
'$',
'%F',
'%T',
'%e',
'%eps',
'%f',
'%fftw',
'%gui',
'%i',
'%inf',
'%io',
'%modalWarning',
'%nan',
'%pi',
'%s',
'%t',
'%tk',
'%toolboxes',
'%toolboxes_dir',
'%z',
'PWD',
'SCI',
'SCIHOME',
'TMPDIR',
'arnoldilib',
'assertlib',
'atomslib',
'cacsdlib',
'compatibility_functilib',
'corelib',
'data_structureslib',
'demo_toolslib',
'development_toolslib',
'differential_equationlib',
'dynamic_linklib',
'elementary_functionslib',
'enull',
'evoid',
'external_objectslib',
'fd',
'fileiolib',
'functionslib',
'genetic_algorithmslib',
'helptoolslib',
'home',
'integerlib',
'interpolationlib',
'iolib',
'jnull',
'jvoid',
'linear_algebralib',
'm2scilib',
'matiolib',
'modules_managerlib',
'neldermeadlib',
'optimbaselib',
'optimizationlib',
'optimsimplexlib',
'output_streamlib',
'overloadinglib',
'parameterslib',
'polynomialslib',
'preferenceslib',
'randliblib',
'scicos_autolib',
'scicos_utilslib',
'scinoteslib',
'signal_processinglib',
'simulated_annealinglib',
'soundlib',
'sparselib',
'special_functionslib',
'spreadsheetlib',
'statisticslib',
'stringlib',
'tclscilib',
'timelib',
'umfpacklib',
'xcoslib',
)
if __name__ == '__main__':
import subprocess
from pygments.util import format_lines, duplicates_removed
mapping = {'variables': 'builtin'}
def extract_completion(var_type):
s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = s.communicate('''\
fd = mopen("/dev/stderr", "wt");
mputl(strcat(completion("", "%s"), "||"), fd);
mclose(fd)\n''' % var_type)
if '||' not in output[1]:
raise Exception(output[0])
# Invalid DISPLAY causes this to be output:
text = output[1].strip()
if text.startswith('Error: unable to open display \n'):
text = text[len('Error: unable to open display \n'):]
return text.split('||')
new_data = {}
seen = set() # only keep first type for a given word
for t in ('functions', 'commands', 'macros', 'variables'):
new_data[t] = duplicates_removed(extract_completion(t), seen)
seen.update(set(new_data[t]))
with open(__file__) as f:
content = f.read()
header = content[:content.find('# Autogenerated')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(__file__, 'w') as f:
f.write(header)
f.write('# Autogenerated\n\n')
for k, v in sorted(new_data.iteritems()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer)
| bsd-3-clause |
sidartaoliveira/ansible | lib/ansible/__init__.py | 301 | 1224 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
#
# This is for backwards compat. Code should be ported to get these from
# ansible.release instead of from here.
from ansible.release import __version__, __author__
| gpl-3.0 |
petecummings/django-cms | cms/test_utils/cli.py | 14 | 13263 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import dj_database_url
import django
from django.utils import six
from cms.utils.compat import DJANGO_1_6, DJANGO_1_7
gettext = lambda s: s
urlpatterns = []
def _detect_migration_layout(apps):
SOUTH_MODULES = {}
DJANGO_MODULES = {}
for module in apps:
try:
__import__('%s.migrations_django' % module)
DJANGO_MODULES[module] = '%s.migrations_django' % module
SOUTH_MODULES[module] = '%s.migrations' % module
except Exception:
pass
return DJANGO_MODULES, SOUTH_MODULES
def configure(db_url, **extra):
from django.conf import settings
if six.PY3:
siteid = 1
else:
siteid = long(1) # nopyflakes
os.environ['DJANGO_SETTINGS_MODULE'] = 'cms.test_utils.cli'
if not 'DATABASES' in extra:
DB = dj_database_url.parse(db_url)
else:
DB = {}
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
defaults = dict(
PROJECT_PATH=PROJECT_PATH,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
},
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True,
DEBUG=True,
TEMPLATE_DEBUG=True,
DATABASE_SUPPORTS_TRANSACTIONS=True,
DATABASES={
'default': DB
},
SITE_ID=siteid,
USE_I18N=True,
MEDIA_ROOT='/media/',
STATIC_ROOT='/static/',
CMS_MEDIA_ROOT='/cms-media/',
CMS_MEDIA_URL='/cms-media/',
MEDIA_URL='/media/',
STATIC_URL='/static/',
ADMIN_MEDIA_PREFIX='/static/admin/',
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
SECRET_KEY='key',
MIDDLEWARE_CLASSES=[
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
],
INSTALLED_APPS=[
'debug_toolbar',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
'treebeard',
'cms',
'menus',
'djangocms_text_ckeditor',
'djangocms_column',
'djangocms_picture',
'djangocms_file',
'djangocms_flash',
'djangocms_googlemap',
'djangocms_teaser',
'djangocms_video',
'djangocms_inherit',
'djangocms_style',
'djangocms_link',
'cms.test_utils.project.sampleapp',
'cms.test_utils.project.placeholderapp',
'cms.test_utils.project.pluginapp.plugins.manytomany_rel',
'cms.test_utils.project.pluginapp.plugins.extra_context',
'cms.test_utils.project.pluginapp.plugins.meta',
'cms.test_utils.project.pluginapp.plugins.one_thing',
'cms.test_utils.project.fakemlng',
'cms.test_utils.project.fileapp',
'cms.test_utils.project.objectpermissionsapp',
'cms.test_utils.project.bunch_of_plugins',
'cms.test_utils.project.extensionapp',
'cms.test_utils.project.mti_pluginapp',
'reversion',
'sekizai',
'hvad',
],
DEBUG_TOOLBAR_PATCH_SETTINGS = False,
INTERNAL_IPS = ['127.0.0.1'],
AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
'cms.test_utils.project.objectpermissionsapp.backends.ObjectPermissionBackend',
),
LANGUAGE_CODE="en",
LANGUAGES=(
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('pt-br', gettext('Brazilian Portuguese')),
('nl', gettext("Dutch")),
('es-mx', u'Español'),
),
CMS_LANGUAGES={
1: [
{
'code': 'en',
'name': gettext('English'),
'fallbacks': ['fr', 'de'],
'public': True,
},
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['fr', 'en'],
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'public': True,
},
{
'code': 'pt-br',
'name': gettext('Brazilian Portuguese'),
'public': False,
},
{
'code': 'es-mx',
'name': u'Español',
'public': True,
},
],
2: [
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['fr'],
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'public': True,
},
],
3: [
{
'code': 'nl',
'name': gettext('Dutch'),
'fallbacks': ['de'],
'public': True,
},
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['nl'],
'public': False,
},
],
'default': {
'hide_untranslated': False,
},
},
CMS_TEMPLATES=(
('col_two.html', gettext('two columns')),
('col_three.html', gettext('three columns')),
('nav_playground.html', gettext('navigation examples')),
('simple.html', 'simple'),
('static.html', 'static placeholders'),
),
CMS_PLACEHOLDER_CONF={
'col_sidebar': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'MultiColumnPlugin', 'SnippetPlugin'),
'name': gettext("sidebar column")
},
'col_left': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin', 'GoogleMapPlugin',
'MultiColumnPlugin', 'StylePlugin', 'EmptyPlugin'),
'name': gettext("left column"),
'plugin_modules': {
'LinkPlugin': 'Different Grouper'
},
'plugin_labels': {
'LinkPlugin': gettext('Add a link')
},
},
'col_right': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin', 'GoogleMapPlugin', 'MultiColumnPlugin',
'StylePlugin'),
'name': gettext("right column")
},
'extra_context': {
"plugins": ('TextPlugin',),
"extra_context": {"extra_width": 250},
"name": "extra context"
},
},
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all',
CMS_CACHE_DURATIONS={
'menus': 0,
'content': 0,
'permissions': 0,
},
CMS_APPHOOKS=[],
CMS_PLUGIN_PROCESSORS=tuple(),
CMS_PLUGIN_CONTEXT_PROCESSORS=tuple(),
CMS_SITE_CHOICES_CACHE_KEY='CMS:site_choices',
CMS_PAGE_CHOICES_CACHE_KEY='CMS:page_choices',
SOUTH_TESTS_MIGRATE=False,
CMS_NAVIGATION_EXTENDERS=(
('cms.test_utils.project.sampleapp.menu_extender.get_nodes', 'SampleApp Menu'),
),
TEST_RUNNER='cms.test_utils.runners.NormalTestRunner',
JUNIT_OUTPUT_DIR='.',
TIME_TESTS=False,
ROOT_URLCONF='cms.test_utils.cli',
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
ALLOWED_HOSTS=['localhost'],
)
from django.utils.functional import empty
settings._wrapped = empty
defaults.update(extra)
if DJANGO_1_7:
defaults.update(dict(
TEMPLATE_CONTEXT_PROCESSORS=[
"django.contrib.auth.context_processors.auth",
'django.contrib.messages.context_processors.messages',
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
'django.core.context_processors.csrf',
"cms.context_processors.cms_settings",
"sekizai.context_processors.sekizai",
"django.core.context_processors.static",
],
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
),
TEMPLATE_DIRS=[
os.path.abspath(os.path.join(PROJECT_PATH, 'project', 'templates'))
],
))
else:
defaults['TEMPLATES'] = [
{
'NAME': 'django',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.abspath(os.path.join(PROJECT_PATH, 'project', 'templates'))],
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
'django.contrib.messages.context_processors.messages',
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
'django.template.context_processors.csrf',
"cms.context_processors.cms_settings",
"sekizai.context_processors.sekizai",
"django.template.context_processors.static",
],
}
}
]
plugins = ('djangocms_column', 'djangocms_file', 'djangocms_flash', 'djangocms_googlemap',
'djangocms_inherit', 'djangocms_link', 'djangocms_picture', 'djangocms_style',
'djangocms_teaser', 'djangocms_video')
DJANGO_MIGRATION_MODULES, SOUTH_MIGRATION_MODULES = _detect_migration_layout(plugins)
if DJANGO_1_6:
defaults['INSTALLED_APPS'].append('south')
defaults['SOUTH_MIGRATION_MODULES'] = SOUTH_MIGRATION_MODULES
else:
defaults['MIGRATION_MODULES'] = DJANGO_MIGRATION_MODULES
if not defaults.get('TESTS_MIGRATE', False):
# Disable migrations for Django 1.7+
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
defaults['MIGRATION_MODULES'] = DisableMigrations()
if 'AUTH_USER_MODEL' in extra:
custom_user_app = 'cms.test_utils.project.' + extra['AUTH_USER_MODEL'].split('.')[0]
defaults['INSTALLED_APPS'].insert(defaults['INSTALLED_APPS'].index('cms'), custom_user_app)
# add data from env
extra_settings = os.environ.get("DJANGO_EXTRA_SETTINGS", None)
if extra_settings:
from json import load, loads
if os.path.exists(extra_settings):
with open(extra_settings) as fobj:
defaults.update(load(fobj))
else:
defaults.update(loads(extra_settings))
settings.configure(**defaults)
if DJANGO_1_6:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
from django.contrib import admin
admin.autodiscover()
else:
django.setup()
| bsd-3-clause |
Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/prompt_toolkit/application.py | 15 | 8747 | from __future__ import unicode_literals
from .buffer import Buffer, AcceptAction
from .buffer_mapping import BufferMapping
from .clipboard import Clipboard, InMemoryClipboard
from .enums import DEFAULT_BUFFER, EditingMode
from .filters import CLIFilter, to_cli_filter
from .key_binding.bindings.basic import load_basic_bindings
from .key_binding.bindings.emacs import load_emacs_bindings
from .key_binding.bindings.vi import load_vi_bindings
from .key_binding.registry import BaseRegistry
from .key_binding.defaults import load_key_bindings
from .layout import Window
from .layout.containers import Container
from .layout.controls import BufferControl
from .styles import DEFAULT_STYLE, Style
import six
__all__ = (
'AbortAction',
'Application',
)
class AbortAction(object):
"""
Actions to take on an Exit or Abort exception.
"""
RETRY = 'retry'
RAISE_EXCEPTION = 'raise-exception'
RETURN_NONE = 'return-none'
_all = (RETRY, RAISE_EXCEPTION, RETURN_NONE)
class Application(object):
"""
Application class to be passed to a
:class:`~prompt_toolkit.interface.CommandLineInterface`.
This contains all customizable logic that is not I/O dependent.
(So, what is independent of event loops, input and output.)
This way, such an :class:`.Application` can run easily on several
:class:`~prompt_toolkit.interface.CommandLineInterface` instances, each
with a different I/O backends. that runs for instance over telnet, SSH or
any other I/O backend.
:param layout: A :class:`~prompt_toolkit.layout.containers.Container` instance.
:param buffer: A :class:`~prompt_toolkit.buffer.Buffer` instance for the default buffer.
:param initial_focussed_buffer: Name of the buffer that is focussed during start-up.
:param key_bindings_registry:
:class:`~prompt_toolkit.key_binding.registry.BaseRegistry` instance for
the key bindings.
:param clipboard: :class:`~prompt_toolkit.clipboard.base.Clipboard` to use.
:param on_abort: What to do when Control-C is pressed.
:param on_exit: What to do when Control-D is pressed.
:param use_alternate_screen: When True, run the application on the alternate screen buffer.
:param get_title: Callable that returns the current title to be displayed in the terminal.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In readline mode, this is usually
reversed.
Filters:
:param mouse_support: (:class:`~prompt_toolkit.filters.CLIFilter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~prompt_toolkit.filters.CLIFilter` or boolean.
:param ignore_case: :class:`~prompt_toolkit.filters.CLIFilter` or boolean.
:param editing_mode: :class:`~prompt_toolkit.enums.EditingMode`.
Callbacks (all of these should accept a
:class:`~prompt_toolkit.interface.CommandLineInterface` object as input.)
:param on_input_timeout: Called when there is no input for x seconds.
(Fired when any eventloop.onInputTimeout is fired.)
:param on_start: Called when reading input starts.
:param on_stop: Called when reading input ends.
:param on_reset: Called during reset.
:param on_buffer_changed: Called when the content of a buffer has been changed.
:param on_initialize: Called after the
:class:`~prompt_toolkit.interface.CommandLineInterface` initializes.
:param on_render: Called right after rendering.
:param on_invalidate: Called when the UI has been invalidated.
"""
def __init__(self, layout=None, buffer=None, buffers=None,
initial_focussed_buffer=DEFAULT_BUFFER,
style=None,
key_bindings_registry=None, clipboard=None,
on_abort=AbortAction.RAISE_EXCEPTION, on_exit=AbortAction.RAISE_EXCEPTION,
use_alternate_screen=False, mouse_support=False,
get_title=None,
paste_mode=False, ignore_case=False, editing_mode=EditingMode.EMACS,
erase_when_done=False,
reverse_vi_search_direction=False,
on_input_timeout=None, on_start=None, on_stop=None,
on_reset=None, on_initialize=None, on_buffer_changed=None,
on_render=None, on_invalidate=None):
paste_mode = to_cli_filter(paste_mode)
ignore_case = to_cli_filter(ignore_case)
mouse_support = to_cli_filter(mouse_support)
reverse_vi_search_direction = to_cli_filter(reverse_vi_search_direction)
assert layout is None or isinstance(layout, Container)
assert buffer is None or isinstance(buffer, Buffer)
assert buffers is None or isinstance(buffers, (dict, BufferMapping))
assert key_bindings_registry is None or isinstance(key_bindings_registry, BaseRegistry)
assert clipboard is None or isinstance(clipboard, Clipboard)
assert on_abort in AbortAction._all
assert on_exit in AbortAction._all
assert isinstance(use_alternate_screen, bool)
assert get_title is None or callable(get_title)
assert isinstance(paste_mode, CLIFilter)
assert isinstance(ignore_case, CLIFilter)
assert isinstance(editing_mode, six.string_types)
assert on_input_timeout is None or callable(on_input_timeout)
assert style is None or isinstance(style, Style)
assert isinstance(erase_when_done, bool)
assert on_start is None or callable(on_start)
assert on_stop is None or callable(on_stop)
assert on_reset is None or callable(on_reset)
assert on_buffer_changed is None or callable(on_buffer_changed)
assert on_initialize is None or callable(on_initialize)
assert on_render is None or callable(on_render)
assert on_invalidate is None or callable(on_invalidate)
self.layout = layout or Window(BufferControl())
# Make sure that the 'buffers' dictionary is a BufferMapping.
# NOTE: If no buffer is given, we create a default Buffer, with IGNORE as
# default accept_action. This is what makes sense for most users
# creating full screen applications. Doing nothing is the obvious
# default. Those creating a REPL would use the shortcuts module that
# passes in RETURN_DOCUMENT.
self.buffer = buffer or Buffer(accept_action=AcceptAction.IGNORE)
if not buffers or not isinstance(buffers, BufferMapping):
self.buffers = BufferMapping(buffers, initial=initial_focussed_buffer)
else:
self.buffers = buffers
if buffer:
self.buffers[DEFAULT_BUFFER] = buffer
self.initial_focussed_buffer = initial_focussed_buffer
self.style = style or DEFAULT_STYLE
if key_bindings_registry is None:
key_bindings_registry = load_key_bindings()
if get_title is None:
get_title = lambda: None
self.key_bindings_registry = key_bindings_registry
self.clipboard = clipboard or InMemoryClipboard()
self.on_abort = on_abort
self.on_exit = on_exit
self.use_alternate_screen = use_alternate_screen
self.mouse_support = mouse_support
self.get_title = get_title
self.paste_mode = paste_mode
self.ignore_case = ignore_case
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
def dummy_handler(cli):
" Dummy event handler. "
self.on_input_timeout = on_input_timeout or dummy_handler
self.on_start = on_start or dummy_handler
self.on_stop = on_stop or dummy_handler
self.on_reset = on_reset or dummy_handler
self.on_initialize = on_initialize or dummy_handler
self.on_buffer_changed = on_buffer_changed or dummy_handler
self.on_render = on_render or dummy_handler
self.on_invalidate = on_invalidate or dummy_handler
# List of 'extra' functions to execute before a CommandLineInterface.run.
# Note: It's important to keep this here, and not in the
# CommandLineInterface itself. shortcuts.run_application creates
# a new Application instance everytime. (Which is correct, it
# could be that we want to detach from one IO backend and attach
# the UI on a different backend.) But important is to keep as
# much state as possible between runs.
self.pre_run_callables = []
| apache-2.0 |
chriskmanx/qmole | QMOLEDEV/pygtk-2.24.0/examples/simple/dnd.py | 6 | 5951 | #! /usr/bin/env python
import pygtk
pygtk.require('2.0')
import gobject
import gtk
from dndpixmap import drag_icon_xpm, trashcan_open_xpm, trashcan_closed_xpm
trashcan_open = None
trashcan_closed = None
have_drag = False;
popped_up = False
in_popup = False
popup_timer = 0
popdown_timer = 0
popup_win = None
TARGET_STRING = 0
TARGET_ROOTWIN = 1
target = [
('STRING', 0, TARGET_STRING),
('text/plain', 0, TARGET_STRING),
('application/x-rootwin-drop', 0, TARGET_ROOTWIN)
]
def target_drag_leave(w, context, time):
global trashcan_closed
global have_drag
print 'leave'
have_drag = False
w.set_from_pixbuf(trashcan_closed)
def target_drag_motion(w, context, x, y, time):
global trashcan_open
global have_drag
if not have_drag:
have_drag = True
w.set_from_pixbuf(trashcan_open)
source_widget = context.get_source_widget()
print 'motion, source ',
if source_widget:
print source_widget.__class__.__name__
else:
print 'unknown'
context.drag_status(context.suggested_action, time)
return True
def target_drag_drop(w, context, x, y, time):
global trashcan_closed
global have_drag
print 'drop'
have_drag = False
w.set_from_pixbuf(trashcan_closed)
if context.targets:
w.drag_get_data(context, context.targets[0], time)
return True
return False
def target_drag_data_received(w, context, x, y, data, info, time):
if data.format == 8:
print 'Received "%s" in trashcan' % data.data
context.finish(True, False, time)
else:
context.finish(False, False, time)
def label_drag_data_received(w, context, x, y, data, info, time):
if data and data.format == 8:
print 'Received "%s" in label' % data.data
context.finish(True, False, time)
else:
context.finish(False, False, time)
def source_drag_data_get(w, context, selection_data, info, time):
if info == TARGET_ROOTWIN:
print 'I was dropped on the rootwin'
else:
selection_data.set(selection_data.target, 8, "I'm Data!")
def popdown_cb():
global popdown_timer, popped_up
global popup_win
popdown_timer = 0
popup_win.hide()
popped_up = False
return False
def popup_motion(w, context, x, y, time):
global in_popup, popdown_timer
if not in_popup:
in_popup = True
if popdown_timer:
print 'removed popdown'
gobject.source_remove(popdown_timer)
popdown_timer = 0
return True
def popup_leave(w, context, time):
global in_popup, popdown_timer
print 'popup_leave'
if in_popup:
in_popup = False
if not popdown_timer:
print 'added popdown'
popdown_timer = gobject.timeout_add(500, popdown_cb)
def popup_cb():
global popped_up, popup_win
global popup_timer, popdown_timer
if not popped_up:
if not popup_win:
popup_win = gtk.Window(gtk.WINDOW_POPUP)
popup_win.set_position(gtk.WIN_POS_MOUSE)
table = gtk.Table(3,3,False)
for k in range(9):
i, j = divmod(k, 3)
b = gtk.Button("%d,%d" % (i,j))
table.attach(b, i,i+1,j,j+1)
b.drag_dest_set(gtk.DEST_DEFAULT_ALL, target,
gtk.gdk.ACTION_COPY | gtk.gdk.ACTION_MOVE)
b.connect('drag_motion', popup_motion)
b.connect('drag_leave', popup_leave)
table.show_all()
popup_win.add(table)
popup_win.show()
popped_up = True
popdown_timer = gobject.timeout_add(500, popdown_cb)
print 'added popdown'
popup_timer = 0
return False
def popsite_motion(w, context, x, y, time):
global popup_timer
if not popup_timer:
popup_timer = gobject.timeout_add(500, popup_cb)
return True
def popsite_leave(w, context, time):
global popup_timer
if popup_timer:
gobject.source_remove(popup_timer)
popup_timer = 0
def source_drag_data_delete(w, context, data):
print 'Delete the data!'
def create_pixmap(w, xpm):
return gtk.gdk.pixmap_create_from_xpm_d(w.window, None, xpm)
def main():
global trashcan_open
global trashcan_closed
global drag_icon
win = gtk.Window()
win.realize()
win.connect('destroy', lambda w: gtk.main_quit())
table = gtk.Table(2,2)
win.add(table)
drag_icon = gtk.gdk.pixbuf_new_from_xpm_data(drag_icon_xpm)
trashcan_open = gtk.gdk.pixbuf_new_from_xpm_data(trashcan_open_xpm)
trashcan_closed = gtk.gdk.pixbuf_new_from_xpm_data(trashcan_closed_xpm)
label = gtk.Label('Drop on Trashcan!\n')
label.drag_dest_set(gtk.DEST_DEFAULT_ALL, target[:-1],
gtk.gdk.ACTION_COPY | gtk.gdk.ACTION_MOVE)
label.connect('drag_data_received', label_drag_data_received)
table.attach(label, 0, 1, 0, 1)
label = gtk.Label('Popup\n')
label.drag_dest_set(gtk.DEST_DEFAULT_ALL, target[:-1],
gtk.gdk.ACTION_COPY | gtk.gdk.ACTION_MOVE)
table.attach(label, 1, 2, 1, 2)
label.connect('drag_motion', popsite_motion)
label.connect('drag_leave', popsite_leave)
image = gtk.Image()
image.set_from_pixbuf(trashcan_closed)
image.drag_dest_set(0, [], 0)
table.attach(image, 1, 2, 0, 1)
image.connect('drag_leave', target_drag_leave)
image.connect('drag_motion', target_drag_motion)
image.connect('drag_drop', target_drag_drop)
image.connect('drag_data_received', target_drag_data_received)
b = gtk.Button('Drag from Here\n')
b.drag_source_set(gtk.gdk.BUTTON1_MASK|gtk.gdk.BUTTON3_MASK, target,
gtk.gdk.ACTION_COPY|gtk.gdk.ACTION_MOVE)
b.drag_source_set_icon_pixbuf(drag_icon)
table.attach(b, 0, 1, 1, 2)
b.connect('drag_data_get', source_drag_data_get)
b.connect('drag_data_delete', source_drag_data_delete)
win.show_all()
main()
gtk.main()
| gpl-3.0 |
kjc88/sl4a | python/src/Tools/pybench/Imports.py | 45 | 2947 | from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
| apache-2.0 |
spbguru/repo1 | nupic/data/jsonhelpers.py | 7 | 5095 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This script is a wrapper for JSON primitives, such as validation.
# Using routines of this module permits us to replace the underlying
# implementation with a better one without disrupting client code.
#
# In particular, at the time of this writing, there weren't really great
# json validation packages available for python. We initially settled
# on validictory, but it has a number of shortcomings, such as:
# * format error diagnostic message isn't always helpful for diagnosis
# * doesn't support references
# * doesn't support application of defaults
# * doesn't support dependencies
#
# TODO: offer a combined json parsing/validation function that applies
# defaults from the schema
import json
import math
import os
import validictory
class ValidationError(validictory.ValidationError):
pass
class NaNInvalidator(validictory.SchemaValidator):
""" validictory.SchemaValidator subclass to not accept NaN values as numbers.
Usage:
validate(value, schemaDict, validator_cls=NaNInvalidator)
"""
def validate_type_number(self, val):
return not math.isnan(val) \
and super(NaNInvalidator, self).validate_type_number(val)
###############################################################################
def validate(value, **kwds):
""" Validate a python value against json schema:
validate(value, schemaPath)
validate(value, schemaDict)
value: python object to validate against the schema
The json schema may be specified either as a path of the file containing
the json schema or as a python dictionary using one of the
following keywords as arguments:
schemaPath: Path of file containing the json schema object.
schemaDict: Python dictionary containing the json schema object
Returns: nothing
Raises:
ValidationError when value fails json validation
"""
assert len(kwds.keys()) >= 1
assert 'schemaPath' in kwds or 'schemaDict' in kwds
schemaDict = None
if 'schemaPath' in kwds:
schemaPath = kwds.pop('schemaPath')
schemaDict = loadJsonValueFromFile(schemaPath)
elif 'schemaDict' in kwds:
schemaDict = kwds.pop('schemaDict')
try:
validictory.validate(value, schemaDict, **kwds)
except validictory.ValidationError as e:
raise ValidationError(e)
###############################################################################
def loadJsonValueFromFile(inputFilePath):
""" Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
"""
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
###############################################################################
def test():
"""
"""
import sys
schemaDict = {
"description":"JSON schema for jsonhelpers.py test code",
"type":"object",
"additionalProperties":False,
"properties":{
"myBool":{
"description":"Some boolean property",
"required":True,
"type":"boolean"
}
}
}
d = {
'myBool': False
}
print "Validating schemaDict method in positive test..."
validate(d, schemaDict=schemaDict)
print "ok\n"
print "Validating schemaDict method in negative test..."
try:
validate({}, schemaDict=schemaDict)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
schemaPath = os.path.join(os.path.dirname(__file__), "testSchema.json")
print "Validating schemaPath method in positive test using %s..." % \
(os.path.abspath(schemaPath),)
validate(d, schemaPath=schemaPath)
print "ok\n"
print "Validating schemaPath method in negative test using %s..." % \
(os.path.abspath(schemaPath),)
try:
validate({}, schemaPath=schemaPath)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
return
###############################################################################
if __name__ == "__main__":
test()
| gpl-3.0 |
procangroup/edx-platform | lms/djangoapps/grades/tests/test_tasks.py | 3 | 21368 | """
Tests for the functionality and infrastructure of grades tasks.
"""
import itertools
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, timedelta
import ddt
import pytz
import six
import django
from django.conf import settings
from django.db.utils import IntegrityError
from mock import MagicMock, patch
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
from lms.djangoapps.grades.constants import ScoreDatabaseTableEnum
from lms.djangoapps.grades.models import PersistentCourseGrade, PersistentSubsectionGrade
from lms.djangoapps.grades.services import GradesService
from lms.djangoapps.grades.signals.signals import PROBLEM_WEIGHTED_SCORE_CHANGED
from lms.djangoapps.grades.tasks import (
RECALCULATE_GRADE_DELAY_SECONDS,
_course_task_args,
compute_grades_for_course_v2,
recalculate_subsection_grade_v3
)
from openedx.core.djangoapps.content.block_structure.exceptions import BlockStructureNotFound
from student.models import CourseEnrollment, anonymous_id_for_user
from student.tests.factories import UserFactory
from track.event_transaction_utils import create_new_event_transaction_id, get_event_transaction_id
from util.date_utils import to_timestamp
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from .utils import mock_get_score
class MockGradesService(GradesService):
def __init__(self, mocked_return_value=None):
super(MockGradesService, self).__init__()
self.mocked_return_value = mocked_return_value
def get_subsection_grade_override(self, user_id, course_key_or_id, usage_key_or_id):
return self.mocked_return_value
class HasCourseWithProblemsMixin(object):
"""
Mixin to provide tests with a sample course with graded subsections
"""
def set_up_course(self, enable_persistent_grades=True, create_multiple_subsections=False):
"""
Configures the course for this test.
"""
# pylint: disable=attribute-defined-outside-init,no-member
self.course = CourseFactory.create(
org='edx',
name='course',
run='run',
)
if not enable_persistent_grades:
PersistentGradesEnabledFlag.objects.create(enabled=False)
self.chapter = ItemFactory.create(parent=self.course, category="chapter", display_name="Chapter")
self.sequential = ItemFactory.create(parent=self.chapter, category='sequential', display_name="Sequential1")
self.problem = ItemFactory.create(parent=self.sequential, category='problem', display_name='Problem')
if create_multiple_subsections:
seq2 = ItemFactory.create(parent=self.chapter, category='sequential')
ItemFactory.create(parent=seq2, category='problem')
self.frozen_now_datetime = datetime.now().replace(tzinfo=pytz.UTC)
self.frozen_now_timestamp = to_timestamp(self.frozen_now_datetime)
self.problem_weighted_score_changed_kwargs = OrderedDict([
('weighted_earned', 1.0),
('weighted_possible', 2.0),
('user_id', self.user.id),
('anonymous_user_id', 5),
('course_id', unicode(self.course.id)),
('usage_id', unicode(self.problem.location)),
('only_if_higher', None),
('modified', self.frozen_now_datetime),
('score_db_table', ScoreDatabaseTableEnum.courseware_student_module),
])
create_new_event_transaction_id()
self.recalculate_subsection_grade_kwargs = OrderedDict([
('user_id', self.user.id),
('course_id', unicode(self.course.id)),
('usage_id', unicode(self.problem.location)),
('anonymous_user_id', 5),
('only_if_higher', None),
('expected_modified_time', self.frozen_now_timestamp),
('score_deleted', False),
('event_transaction_id', unicode(get_event_transaction_id())),
('event_transaction_type', u'edx.grades.problem.submitted'),
('score_db_table', ScoreDatabaseTableEnum.courseware_student_module),
])
# this call caches the anonymous id on the user object, saving 4 queries in all happy path tests
_ = anonymous_id_for_user(self.user, self.course.id)
# pylint: enable=attribute-defined-outside-init,no-member
# TODO: Remove Django 1.11 upgrade shim
# SHIM: Django 1.11 results in a few more SAVEPOINTs due to:
# https://github.com/django/django/commit/d44afd88#diff-5b0dda5eb9a242c15879dc9cd2121379L485
# Get rid of this logic post-upgrade.
def _recalc_expected_query_counts():
if django.VERSION >= (1, 11):
return 27
else:
return 23
# TODO: Remove Django 1.11 upgrade shim
# SHIM: Django 1.11 results in a few more SAVEPOINTs due to:
# https://github.com/django/django/commit/d44afd88#diff-5b0dda5eb9a242c15879dc9cd2121379L485
# Get rid of this logic post-upgrade.
def _recalc_persistent_expected_query_counts():
if django.VERSION >= (1, 11):
return 28
else:
return 24
@patch.dict(settings.FEATURES, {'PERSISTENT_GRADES_ENABLED_FOR_ALL_TESTS': False})
@ddt.ddt
class RecalculateSubsectionGradeTest(HasCourseWithProblemsMixin, ModuleStoreTestCase):
"""
Ensures that the recalculate subsection grade task functions as expected when run.
"""
ENABLED_SIGNALS = ['course_published', 'pre_publish']
def setUp(self):
super(RecalculateSubsectionGradeTest, self).setUp()
self.user = UserFactory()
PersistentGradesEnabledFlag.objects.create(enabled_for_all_courses=True, enabled=True)
@contextmanager
def mock_csm_get_score(self, score=MagicMock(grade=1.0, max_grade=2.0)):
"""
Mocks the scores needed by the SCORE_PUBLISHED signal
handler. By default, sets the returned score to 1/2.
"""
with patch("lms.djangoapps.grades.tasks.get_score", return_value=score):
yield
def test_triggered_by_problem_weighted_score_change(self):
"""
Ensures that the PROBLEM_WEIGHTED_SCORE_CHANGED signal enqueues the correct task.
"""
self.set_up_course()
send_args = self.problem_weighted_score_changed_kwargs
local_task_args = self.recalculate_subsection_grade_kwargs.copy()
local_task_args['event_transaction_type'] = u'edx.grades.problem.submitted'
with self.mock_csm_get_score() and patch(
'lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.apply_async',
return_value=None
) as mock_task_apply:
PROBLEM_WEIGHTED_SCORE_CHANGED.send(sender=None, **send_args)
mock_task_apply.assert_called_once_with(countdown=RECALCULATE_GRADE_DELAY_SECONDS, kwargs=local_task_args)
@patch('lms.djangoapps.grades.signals.signals.SUBSECTION_SCORE_CHANGED.send')
def test_triggers_subsection_score_signal(self, mock_subsection_signal):
"""
Ensures that a subsection grade recalculation triggers a signal.
"""
self.set_up_course()
self._apply_recalculate_subsection_grade()
self.assertTrue(mock_subsection_signal.called)
def test_block_structure_created_only_once(self):
self.set_up_course()
self.assertTrue(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
with patch(
'openedx.core.djangoapps.content.block_structure.factory.BlockStructureFactory.create_from_store',
side_effect=BlockStructureNotFound(self.course.location),
) as mock_block_structure_create:
self._apply_recalculate_subsection_grade()
self.assertEquals(mock_block_structure_create.call_count, 1)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 1, _recalc_expected_query_counts(), True),
(ModuleStoreEnum.Type.mongo, 1, _recalc_expected_query_counts(), False),
(ModuleStoreEnum.Type.split, 3, _recalc_expected_query_counts(), True),
(ModuleStoreEnum.Type.split, 3, _recalc_expected_query_counts(), False),
)
@ddt.unpack
def test_query_counts(self, default_store, num_mongo_calls, num_sql_calls, create_multiple_subsections):
with self.store.default_store(default_store):
self.set_up_course(create_multiple_subsections=create_multiple_subsections)
self.assertTrue(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
with check_mongo_calls(num_mongo_calls):
with self.assertNumQueries(num_sql_calls):
self._apply_recalculate_subsection_grade()
@ddt.data(
(ModuleStoreEnum.Type.mongo, 1, _recalc_expected_query_counts()),
(ModuleStoreEnum.Type.split, 3, _recalc_expected_query_counts()),
)
@ddt.unpack
def test_query_counts_dont_change_with_more_content(self, default_store, num_mongo_calls, num_sql_calls):
with self.store.default_store(default_store):
self.set_up_course(create_multiple_subsections=True)
self.assertTrue(PersistentGradesEnabledFlag.feature_enabled(self.course.id))
num_problems = 10
for _ in range(num_problems):
ItemFactory.create(parent=self.sequential, category='problem')
num_sequentials = 10
for _ in range(num_sequentials):
ItemFactory.create(parent=self.chapter, category='sequential')
with check_mongo_calls(num_mongo_calls):
with self.assertNumQueries(num_sql_calls):
self._apply_recalculate_subsection_grade()
@patch('lms.djangoapps.grades.signals.signals.SUBSECTION_SCORE_CHANGED.send')
def test_other_inaccessible_subsection(self, mock_subsection_signal):
self.set_up_course()
accessible_seq = ItemFactory.create(parent=self.chapter, category='sequential')
inaccessible_seq = ItemFactory.create(parent=self.chapter, category='sequential', visible_to_staff_only=True)
# Update problem to have 2 additional sequential parents.
# So in total, 3 sequential parents, with one inaccessible.
for sequential in (accessible_seq, inaccessible_seq):
sequential.children = [self.problem.location]
modulestore().update_item(sequential, self.user.id) # pylint: disable=no-member
# Make sure the signal is sent for only the 2 accessible sequentials.
self._apply_recalculate_subsection_grade()
self.assertEquals(mock_subsection_signal.call_count, 2)
sequentials_signalled = {
args[1]['subsection_grade'].location
for args in mock_subsection_signal.call_args_list
}
self.assertSetEqual(
sequentials_signalled,
{self.sequential.location, accessible_seq.location},
)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 1, 11),
(ModuleStoreEnum.Type.split, 3, 11),
)
@ddt.unpack
def test_persistent_grades_not_enabled_on_course(self, default_store, num_mongo_queries, num_sql_queries):
with self.store.default_store(default_store):
self.set_up_course(enable_persistent_grades=False)
with check_mongo_calls(num_mongo_queries):
with self.assertNumQueries(num_sql_queries):
self._apply_recalculate_subsection_grade()
with self.assertRaises(PersistentCourseGrade.DoesNotExist):
PersistentCourseGrade.read(self.user.id, self.course.id)
self.assertEqual(len(PersistentSubsectionGrade.bulk_read_grades(self.user.id, self.course.id)), 0)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 1, _recalc_persistent_expected_query_counts()),
(ModuleStoreEnum.Type.split, 3, _recalc_persistent_expected_query_counts()),
)
@ddt.unpack
def test_persistent_grades_enabled_on_course(self, default_store, num_mongo_queries, num_sql_queries):
with self.store.default_store(default_store):
self.set_up_course(enable_persistent_grades=True)
with check_mongo_calls(num_mongo_queries):
with self.assertNumQueries(num_sql_queries):
self._apply_recalculate_subsection_grade()
self.assertIsNotNone(PersistentCourseGrade.read(self.user.id, self.course.id))
self.assertGreater(len(PersistentSubsectionGrade.bulk_read_grades(self.user.id, self.course.id)), 0)
@patch('lms.djangoapps.grades.signals.signals.SUBSECTION_SCORE_CHANGED.send')
@patch('lms.djangoapps.grades.subsection_grade_factory.SubsectionGradeFactory.update')
def test_retry_first_time_only(self, mock_update, mock_course_signal):
"""
Ensures that a task retry completes after a one-time failure.
"""
self.set_up_course()
mock_update.side_effect = [IntegrityError("WHAMMY"), None]
self._apply_recalculate_subsection_grade()
self.assertEquals(mock_course_signal.call_count, 1)
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.retry')
@patch('lms.djangoapps.grades.subsection_grade_factory.SubsectionGradeFactory.update')
def test_retry_on_integrity_error(self, mock_update, mock_retry):
"""
Ensures that tasks will be retried if IntegrityErrors are encountered.
"""
self.set_up_course()
mock_update.side_effect = IntegrityError("WHAMMY")
self._apply_recalculate_subsection_grade()
self._assert_retry_called(mock_retry)
@ddt.data(ScoreDatabaseTableEnum.courseware_student_module, ScoreDatabaseTableEnum.submissions,
ScoreDatabaseTableEnum.overrides)
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.retry')
@patch('lms.djangoapps.grades.tasks.log')
def test_retry_when_db_not_updated(self, score_db_table, mock_log, mock_retry):
self.set_up_course()
self.recalculate_subsection_grade_kwargs['score_db_table'] = score_db_table
modified_datetime = datetime.utcnow().replace(tzinfo=pytz.UTC) - timedelta(days=1)
if score_db_table == ScoreDatabaseTableEnum.submissions:
with patch('lms.djangoapps.grades.tasks.sub_api.get_score') as mock_sub_score:
mock_sub_score.return_value = {
'created_at': modified_datetime
}
self._apply_recalculate_subsection_grade(
mock_score=MagicMock(module_type='any_block_type')
)
elif score_db_table == ScoreDatabaseTableEnum.courseware_student_module:
self._apply_recalculate_subsection_grade(
mock_score=MagicMock(modified=modified_datetime)
)
else:
with patch(
'lms.djangoapps.grades.tasks.GradesService',
return_value=MockGradesService(mocked_return_value=MagicMock(modified=modified_datetime))
):
recalculate_subsection_grade_v3.apply(kwargs=self.recalculate_subsection_grade_kwargs)
self._assert_retry_called(mock_retry)
self.assertIn(
u"Grades: tasks._has_database_updated_with_new_score is False.",
mock_log.info.call_args_list[0][0][0]
)
@ddt.data(
*itertools.product(
(True, False),
(ScoreDatabaseTableEnum.courseware_student_module, ScoreDatabaseTableEnum.submissions,
ScoreDatabaseTableEnum.overrides),
)
)
@ddt.unpack
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.retry')
@patch('lms.djangoapps.grades.tasks.log')
def test_when_no_score_found(self, score_deleted, score_db_table, mock_log, mock_retry):
self.set_up_course()
self.recalculate_subsection_grade_kwargs['score_deleted'] = score_deleted
self.recalculate_subsection_grade_kwargs['score_db_table'] = score_db_table
if score_db_table == ScoreDatabaseTableEnum.submissions:
with patch('lms.djangoapps.grades.tasks.sub_api.get_score') as mock_sub_score:
mock_sub_score.return_value = None
self._apply_recalculate_subsection_grade(
mock_score=MagicMock(module_type='any_block_type')
)
elif score_db_table == ScoreDatabaseTableEnum.overrides:
with patch('lms.djangoapps.grades.tasks.GradesService',
return_value=MockGradesService(mocked_return_value=None)) as mock_service:
mock_service.get_subsection_grade_override.return_value = None
recalculate_subsection_grade_v3.apply(kwargs=self.recalculate_subsection_grade_kwargs)
else:
self._apply_recalculate_subsection_grade(mock_score=None)
if score_deleted:
self._assert_retry_not_called(mock_retry)
else:
self._assert_retry_called(mock_retry)
self.assertIn(
u"Grades: tasks._has_database_updated_with_new_score is False.",
mock_log.info.call_args_list[0][0][0]
)
@patch('lms.djangoapps.grades.tasks.log')
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.retry')
@patch('lms.djangoapps.grades.subsection_grade_factory.SubsectionGradeFactory.update')
def test_log_unknown_error(self, mock_update, mock_retry, mock_log):
"""
Ensures that unknown errors are logged before a retry.
"""
self.set_up_course()
mock_update.side_effect = Exception("General exception with no further detail!")
self._apply_recalculate_subsection_grade()
self.assertIn("General exception with no further detail!", mock_log.info.call_args[0][0])
self._assert_retry_called(mock_retry)
@patch('lms.djangoapps.grades.tasks.log')
@patch('lms.djangoapps.grades.tasks.recalculate_subsection_grade_v3.retry')
@patch('lms.djangoapps.grades.subsection_grade_factory.SubsectionGradeFactory.update')
def test_no_log_known_error(self, mock_update, mock_retry, mock_log):
"""
Ensures that known errors are not logged before a retry.
"""
self.set_up_course()
mock_update.side_effect = IntegrityError("race condition oh noes")
self._apply_recalculate_subsection_grade()
self.assertFalse(mock_log.info.called)
self._assert_retry_called(mock_retry)
def _apply_recalculate_subsection_grade(
self,
mock_score=MagicMock(
modified=datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(days=1),
grade=1.0,
max_grade=2.0,
)
):
"""
Calls the recalculate_subsection_grade task with necessary
mocking in place.
"""
with self.mock_csm_get_score(mock_score):
with mock_get_score(1, 2):
recalculate_subsection_grade_v3.apply(kwargs=self.recalculate_subsection_grade_kwargs)
def _assert_retry_called(self, mock_retry):
"""
Verifies the task was retried and with the correct
number of arguments.
"""
self.assertTrue(mock_retry.called)
self.assertEquals(len(mock_retry.call_args[1]['kwargs']), len(self.recalculate_subsection_grade_kwargs))
def _assert_retry_not_called(self, mock_retry):
"""
Verifies the task was not retried.
"""
self.assertFalse(mock_retry.called)
@ddt.ddt
class ComputeGradesForCourseTest(HasCourseWithProblemsMixin, ModuleStoreTestCase):
"""
Test compute_grades_for_course_v2 task.
"""
ENABLED_SIGNALS = ['course_published', 'pre_publish']
def setUp(self):
super(ComputeGradesForCourseTest, self).setUp()
self.users = [UserFactory.create() for _ in xrange(12)]
self.set_up_course()
for user in self.users:
CourseEnrollment.enroll(user, self.course.id)
@ddt.data(*xrange(0, 12, 3))
def test_behavior(self, batch_size):
with mock_get_score(1, 2):
result = compute_grades_for_course_v2.delay(
course_key=six.text_type(self.course.id),
batch_size=batch_size,
offset=4,
)
self.assertTrue(result.successful)
self.assertEqual(
PersistentCourseGrade.objects.filter(course_id=self.course.id).count(),
min(batch_size, 8) # No more than 8 due to offset
)
self.assertEqual(
PersistentSubsectionGrade.objects.filter(course_id=self.course.id).count(),
min(batch_size, 8) # No more than 8 due to offset
)
@ddt.data(*xrange(1, 12, 3))
def test_course_task_args(self, test_batch_size):
offset_expected = 0
for course_key, offset, batch_size in _course_task_args(
batch_size=test_batch_size, course_key=self.course.id, from_settings=False
):
self.assertEqual(course_key, six.text_type(self.course.id))
self.assertEqual(batch_size, test_batch_size)
self.assertEqual(offset, offset_expected)
offset_expected += test_batch_size
| agpl-3.0 |
fdvarela/odoo8 | addons/analytic/__openerp__.py | 12 | 2609 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Analytic Accounting',
'version': '1.1',
'author' : 'OpenERP SA',
'website' : 'http://www.openerp.com',
'category': 'Hidden/Dependency',
'depends' : ['base', 'decimal_precision', 'mail'],
'description': """
Module for defining analytic accounting object.
===============================================
In OpenERP, analytic accounts are linked to general accounts but are treated
totally independently. So, you can enter various different analytic operations
that have no counterpart in the general financial accounts.
""",
'data': [
'security/analytic_security.xml',
'security/ir.model.access.csv',
'data/analytic_sequence.xml',
'views/analytic_view.xml',
'data/analytic_data.xml',
'analytic_report.xml',
'wizard/account_analytic_balance_report_view.xml',
'wizard/account_analytic_cost_ledger_view.xml',
'wizard/account_analytic_inverted_balance_report.xml',
'wizard/account_analytic_journal_report_view.xml',
'wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'wizard/account_analytic_chart_view.xml',
'views/report_analyticbalance.xml',
'views/report_analyticjournal.xml',
'views/report_analyticcostledgerquantity.xml',
'views/report_analyticcostledger.xml',
'views/report_invertedanalyticbalance.xml',
],
'demo': [
'data/analytic_demo.xml',
'data/analytic_account_demo.xml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hbhzwj/imalse | tools/ns-allinone-3.14.1/ns-3.14.1/.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Tools/qt4.py | 12 | 14141 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import c_preproc,cxx
from waflib import Task,Utils,Options,Errors
from waflib.TaskGen import feature,after_method,extension
from waflib.Configure import conf
from waflib import Logs
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT4=['.cpp','.cc','.cxx','.C']
QT4_LIBS="QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative"
class qxx(cxx.cxx):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def scan(self):
(nodes,names)=c_preproc.scan(self)
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.path_from(self.inputs[0].parent.get_bld()))
return(nodes,names)
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
moctasks=[]
mocfiles=[]
try:
tmp_lst=bld.raw_deps[self.uid()]
bld.raw_deps[self.uid()]=[]
except KeyError:
tmp_lst=[]
for d in tmp_lst:
if not d.endswith('.moc'):
continue
if d in mocfiles:
Logs.error("paranoia owns")
continue
mocfiles.append(d)
h_node=None
try:ext=Options.options.qt_header_ext.split()
except AttributeError:pass
if not ext:ext=MOC_H
base2=d[:-4]
for x in[node.parent]+self.generator.includes_nodes:
for e in ext:
h_node=x.find_node(base2+e)
if h_node:
break
if h_node:
m_node=h_node.change_ext('.moc')
break
else:
for k in EXT_QT4:
if base2.endswith(k):
for x in[node.parent]+self.generator.includes_nodes:
h_node=x.find_node(base2)
if h_node:
break
if h_node:
m_node=h_node.change_ext(k+'.moc')
break
if not h_node:
raise Errors.WafError('no header found for %r which is a moc file'%d)
bld.node_deps[(self.inputs[0].parent.abspath(),m_node.name)]=h_node
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(h_node)
task.set_outputs(m_node)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
tmp_lst=bld.raw_deps[self.uid()]=mocfiles
lst=bld.node_deps.get(self.uid(),())
for d in lst:
name=d.name
if name.endswith('.moc'):
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(bld.node_deps[(self.inputs[0].parent.abspath(),name)])
task.set_outputs(d)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
run=Task.classes['cxx'].__dict__['run']
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
rcctask=self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
def create_uic_task(self,node):
uictask=self.create_task('ui4',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
def apply_qt4(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt4:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in['-D','-I','/D','/I']:
if(f[0]=='/'):
lst.append('-'+flag[1:])
else:
lst.append(flag)
self.env['MOC_FLAGS']=lst
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def scan(self):
node=self.inputs[0]
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return([],[])
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath())
parser.parse(fi)
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui4(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
self.find_qt4_binaries()
self.set_qt4_libs_to_check()
self.find_qt4_libraries()
self.add_qt4_rpath()
self.simplify_qt4_libs()
def find_qt4_binaries(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=self.environ.get('QT4_ROOT','')
qtbin=os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['4','0','0']
for qmk in['qmake-qt4','qmake4','qmake']:
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log([qmake,'-query','QT_VERSION']).strip()
except self.errors.ConfigurationError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if cand:
self.env.QMAKE=cand
else:
self.fatal('Could not find qmake for qt4')
qtbin=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_BINS']).strip()+os.sep
def find_bin(lst,var):
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt3','uic3'],'QT_UIC3')
find_bin(['uic-qt4','uic'],'QT_UIC')
if not env['QT_UIC']:
self.fatal('cannot find the uic compiler for qt4')
try:
uicver=self.cmd_and_log(env['QT_UIC']+" -version 2>&1").strip()
except self.errors.ConfigurationError:
self.fatal('this uic compiler is for qt3, add uic for qt4 to your path')
uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','')
self.msg('Checking for uic version','%s'%uicver)
if uicver.find(' 3.')!=-1:
self.fatal('this uic compiler is for qt3, add uic for qt4 to your path')
find_bin(['moc-qt4','moc'],'QT_MOC')
find_bin(['rcc'],'QT_RCC')
find_bin(['lrelease-qt4','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt4','lupdate'],'QT_LUPDATE')
env['UIC3_ST']='%s -o %s'
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
env.MOCCPPPATH_ST='-I%s'
env.MOCDEFINES_ST='-D%s'
def find_qt4_libraries(self):
qtlibs=getattr(Options.options,'qtlibs','')
if not qtlibs:
try:
qtlibs=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtlibs=os.path.join(qtdir,'lib')
self.msg('Found the Qt4 libraries in',qtlibs)
qtincludes=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_HEADERS']).strip()
env=self.env
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib'%(qtlibs,qtlibs)
try:
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt4_vars:
uselib=i.upper()
if Utils.unversioned_sys_platform()=="darwin":
frameworkName=i+".framework"
qtDynamicLib=os.path.join(qtlibs,frameworkName,i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers'))
elif sys.platform!="win32":
qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so")
qtStaticLib=os.path.join(qtlibs,"lib"+i+".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtStaticLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for k in("lib%s.a","lib%s4.a","%s.lib","%s4.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
uselib=i.upper()+"_debug"
for k in("lib%sd.a","lib%sd4.a","%sd.lib","%sd4.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for i in self.qt4_vars_debug+self.qt4_vars:
self.check_cfg(package=i,args='--cflags --libs',mandatory=False)
def simplify_qt4_libs(self):
env=self.env
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(self.qt4_vars,'LIBPATH_QTCORE')
process_lib(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG')
def add_qt4_rpath(self):
env=self.env
if Options.options.want_rpath:
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(self.qt4_vars,'LIBPATH_QTCORE')
process_rpath(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG')
def set_qt4_libs_to_check(self):
if not hasattr(self,'qt4_vars'):
self.qt4_vars=QT4_LIBS
self.qt4_vars=Utils.to_list(self.qt4_vars)
if not hasattr(self,'qt4_vars_debug'):
self.qt4_vars_debug=[a+'_debug'for a in self.qt4_vars]
self.qt4_vars_debug=Utils.to_list(self.qt4_vars_debug)
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt4",default=False)
extension(*EXT_RCC)(create_rcc_task)
extension(*EXT_UI)(create_uic_task)
extension('.ts')(add_lang)
feature('qt4')(apply_qt4)
after_method('apply_link')(apply_qt4)
extension(*EXT_QT4)(cxx_hook)
conf(find_qt4_binaries)
conf(find_qt4_libraries)
conf(simplify_qt4_libs)
conf(add_qt4_rpath)
conf(set_qt4_libs_to_check) | gpl-3.0 |
gannetson/sportschooldeopenlucht | apps/wallposts/serializers.py | 1 | 4249 | from bluebottle.accounts.serializers import UserPreviewSerializer
from bluebottle.bluebottle_drf2.serializers import OEmbedField, PolymorphicSerializer, SorlImageField, ContentTextField, ImageSerializer, PhotoSerializer
from apps.wallposts.models import WallPost, SystemWallPost
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from .models import MediaWallPost, TextWallPost, MediaWallPostPhoto, Reaction
# Serializer to serialize all wall-posts for an object into an array of ids
# Add a field like so:
# wallpost_ids = WallPostListSerializer()
class WallPostListSerializer(serializers.Field):
def field_to_native(self, obj, field_name):
content_type = ContentType.objects.get_for_model(obj)
list = WallPost.objects.filter(object_id=obj.id).filter(content_type=content_type)
list = list.values_list('id', flat=True).order_by('-created').all()
return list
# Serializer for WallPost Reactions.
class ReactionSerializer(serializers.ModelSerializer):
author = UserPreviewSerializer()
text = ContentTextField()
wallpost = serializers.PrimaryKeyRelatedField()
url = serializers.HyperlinkedIdentityField(view_name="wallpost-reaction-detail")
class Meta:
model = Reaction
fields = ('created', 'author', 'text', 'id', 'wallpost', 'url')
# Serializers for WallPosts.
class WallPostTypeField(serializers.Field):
""" Used to add a type to WallPosts (e.g. media, text etc). """
def __init__(self, type, **kwargs):
super(WallPostTypeField, self).__init__(source='*', **kwargs)
self.type = type
def to_native(self, value):
return self.type
class WallPostSerializerBase(serializers.ModelSerializer):
"""
Base class serializer for WallPosts. This is not used directly; please subclass it.
"""
author = UserPreviewSerializer()
reactions = ReactionSerializer(many=True, read_only=True)
class Meta:
fields = ('id', 'type', 'author', 'created', 'reactions')
class MediaWallPostPhotoSerializer(serializers.ModelSerializer):
photo = PhotoSerializer(required=False)
mediawallpost = serializers.PrimaryKeyRelatedField(required=False, read_only=False)
class Meta:
model = MediaWallPostPhoto
fields = ('id', 'photo', 'mediawallpost')
class MediaWallPostSerializer(WallPostSerializerBase):
"""
Serializer for MediaWallPosts. This should not be used directly but instead should be subclassed for the specific
model it's a WallPost about. See ProjectMediaWallPost for an example.
"""
type = WallPostTypeField(type='media')
text = ContentTextField(required=False)
video_html = OEmbedField(source='video_url', maxwidth='560', maxheight='315')
photos = MediaWallPostPhotoSerializer(many=True, required=False)
class Meta:
model = MediaWallPost
fields = WallPostSerializerBase.Meta.fields + ('title', 'text', 'video_html', 'video_url', 'photos')
class TextWallPostSerializer(WallPostSerializerBase):
"""
Serializer for TextWallPosts. This should not be used directly but instead should be subclassed for the specific
model it's a WallPost about. See ProjectTextWallPost for an example.
"""
type = WallPostTypeField(type='text')
text = ContentTextField()
class Meta:
model = TextWallPost
fields = WallPostSerializerBase.Meta.fields + ('text',)
class SystemWallPostSerializer(WallPostSerializerBase):
"""
Serializer for TextWallPosts. This should not be used directly but instead should be subclassed for the specific
model it's a WallPost about. See ProjectTextWallPost for an example.
"""
type = WallPostTypeField(type='system')
text = ContentTextField()
related_type = serializers.CharField(source='related_type.name')
class Meta:
model = TextWallPost
fields = WallPostSerializerBase.Meta.fields + ('text', 'related_type')
class WallPostSerializer(PolymorphicSerializer):
class Meta:
child_models = (
(TextWallPost, TextWallPostSerializer),
(MediaWallPost, MediaWallPostSerializer),
(SystemWallPost, SystemWallPostSerializer),
)
| bsd-3-clause |
klmitch/nova | nova/tests/unit/api/openstack/compute/test_microversions.py | 4 | 13671 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
from nova import test
from nova.tests.unit.api.openstack.compute import microversions
from nova.tests.unit.api.openstack import fakes
class LegacyMicroversionsTest(test.NoDBTestCase):
header_name = 'X-OpenStack-Nova-API-Version'
def setUp(self):
super(LegacyMicroversionsTest, self).setUp()
self.app = fakes.wsgi_app_v21(custom_routes=microversions.ROUTES)
def _test_microversions(self, app, req, ret_code, ret_header=None):
req.environ['CONTENT_TYPE'] = "application/json"
res = req.get_response(app)
self.assertEqual(ret_code, res.status_int)
if ret_header:
if 'nova' not in self.header_name.lower():
ret_header = 'compute %s' % ret_header
self.assertEqual(ret_header,
res.headers[self.header_name])
return res
def _make_header(self, req_header):
if 'nova' in self.header_name.lower():
headers = {self.header_name: req_header}
else:
headers = {self.header_name: 'compute %s' % req_header}
return headers
def test_microversions_no_header(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID,
method='GET')
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
def test_microversions_return_header(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID)
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
if 'nova' in self.header_name.lower():
self.assertEqual("2.1", res.headers[self.header_name])
else:
self.assertEqual("compute 2.1", res.headers[self.header_name])
self.assertIn(self.header_name, res.headers.getall('Vary'))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_return_header_non_default(self,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header('2.3')
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
if 'nova' in self.header_name.lower():
self.assertEqual("2.3", res.headers[self.header_name])
else:
self.assertEqual("compute 2.3", res.headers[self.header_name])
self.assertIn(self.header_name, res.headers.getall('Vary'))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_return_header_fault(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.0")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header('3.0')
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
if 'nova' in self.header_name.lower():
self.assertEqual("3.0", res.headers[self.header_name])
else:
self.assertEqual("compute 3.0", res.headers[self.header_name])
self.assertIn(self.header_name, res.headers.getall('Vary'))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def _check_microversion_response(self, url, req_version, resp_param,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest('2.3')
req = fakes.HTTPRequest.blank(url)
req.headers = self._make_header(req_version)
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual(resp_param, resp_json['param'])
def test_microversions_with_header(self):
self._check_microversion_response(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID,
'2.3', 'val2')
def test_microversions_with_header_exact_match(self):
self._check_microversion_response(
'/v2/%s/microversions' % fakes.FAKE_PROJECT_ID,
'2.2', 'val2')
def test_microversions2_no_2_1_version(self):
self._check_microversion_response(
'/v2/%s/microversions2' % fakes.FAKE_PROJECT_ID,
'2.3', 'controller2_val1')
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions2_later_version(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.1")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions2' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header('3.0')
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val2', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions2_version_too_high(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions2' % fakes.FAKE_PROJECT_ID)
req.headers = {self.header_name: '3.2'}
res = req.get_response(self.app)
self.assertEqual(404, res.status_int)
def test_microversions2_version_too_low(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions2' % fakes.FAKE_PROJECT_ID)
req.headers = {self.header_name: '2.1'}
res = req.get_response(self.app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_global_version_too_high(self,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions2' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header('3.7')
res = req.get_response(self.app)
self.assertEqual(406, res.status_int)
res_json = jsonutils.loads(res.body)
self.assertEqual("Version 3.7 is not supported by the API. "
"Minimum is 2.1 and maximum is 3.5.",
res_json['computeFault']['message'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_schema(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions3' % fakes.FAKE_PROJECT_ID)
req.method = 'POST'
req.headers = self._make_header('2.2')
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dump_as_bytes({'dummy': {'val': 'foo'}})
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('create_val1', resp_json['param'])
if 'nova' in self.header_name.lower():
self.assertEqual("2.2", res.headers[self.header_name])
else:
self.assertEqual("compute 2.2", res.headers[self.header_name])
self.assertIn(self.header_name, res.headers.getall('Vary'))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_schema_fail(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions3' % fakes.FAKE_PROJECT_ID)
req.method = 'POST'
req.headers = {self.header_name: '2.2'}
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dump_as_bytes({'dummy': {'invalid_param': 'foo'}})
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertTrue(resp_json['badRequest']['message'].startswith(
"Invalid input for field/attribute dummy."))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_schema_out_of_version_check(self,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions3/1' % fakes.FAKE_PROJECT_ID)
req.method = 'PUT'
req.headers = self._make_header('2.2')
req.body = jsonutils.dump_as_bytes({'dummy': {'inv_val': 'foo'}})
req.environ['CONTENT_TYPE'] = "application/json"
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
if 'nova' in self.header_name.lower():
self.assertEqual("2.2", res.headers[self.header_name])
else:
self.assertEqual("compute 2.2", res.headers[self.header_name])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_microversions_schema_second_version(self,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions3/1' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header('2.10')
req.environ['CONTENT_TYPE'] = "application/json"
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes({'dummy': {'val2': 'foo'}})
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
if 'nova' in self.header_name.lower():
self.assertEqual("2.10", res.headers[self.header_name])
else:
self.assertEqual("compute 2.10", res.headers[self.header_name])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def _test_microversions_inner_function(self, version, expected_resp,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.2")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions4' % fakes.FAKE_PROJECT_ID)
req.headers = self._make_header(version)
req.environ['CONTENT_TYPE'] = "application/json"
req.method = 'POST'
req.body = b''
res = req.get_response(self.app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual(expected_resp, resp_json['param'])
if 'nova' not in self.header_name.lower():
version = 'compute %s' % version
self.assertEqual(version, res.headers[self.header_name])
def test_microversions_inner_function_v22(self):
self._test_microversions_inner_function('2.2', 'controller4_val2')
def test_microversions_inner_function_v21(self):
self._test_microversions_inner_function('2.1', 'controller4_val1')
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def _test_microversions_actions(self, ret_code, ret_header, req_header,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
req = fakes.HTTPRequest.blank(
'/v2/%s/microversions3/1/action' % fakes.FAKE_PROJECT_ID)
if req_header:
req.headers = self._make_header(req_header)
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({'foo': None})
res = self._test_microversions(self.app, req, ret_code,
ret_header=ret_header)
if ret_code == 202:
resp_json = jsonutils.loads(res.body)
self.assertEqual({'foo': 'bar'}, resp_json)
def test_microversions_actions(self):
self._test_microversions_actions(202, "2.1", "2.1")
def test_microversions_actions_too_high(self):
self._test_microversions_actions(404, "2.3", "2.3")
def test_microversions_actions_no_header(self):
self._test_microversions_actions(202, "2.1", None)
class MicroversionsTest(LegacyMicroversionsTest):
header_name = 'OpenStack-API-Version'
| apache-2.0 |
912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/views/decorators/csrf.py | 228 | 2201 | from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import decorator_from_middleware, available_attrs
from functools import wraps
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.