repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sjtufs/GroundHog | experiments/nmt/sample.py | 1 | 18993 | #!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import\
RNNEncoderDecoder,\
prototype_state,\
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_dec):
self.enc_dec = enc_dec
state = self.enc_dec.state
self.eos_id = state['null_sym_target']
self.unk_id = state['unk_sym_target']
def compile(self):
self.comp_repr = self.enc_dec.create_representation_computer()
self.comp_init_states = self.enc_dec.create_initializers()
self.comp_next_probs = self.enc_dec.create_next_probs_computer()
self.comp_next_states = self.enc_dec.create_next_states_computer()
def search(self, seq, n_samples, ignore_unk=False, minlen=1):
c = self.comp_repr(seq)[0]
states = map(lambda x : x[None, :], self.comp_init_states(c))
dim = states[0].shape[1]
num_levels = len(states)
fin_trank = []
fin_costs = []
trans = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t : t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
log_probs = numpy.log(self.comp_next_probs(c, k, last_words, *states)[0])
print last_words
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:,self.unk_id] = -numpy.inf
# TODO: report me in the paper!!!
if k < minlen:
log_probs[:,self.eos_id] = -numpy.inf
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
# which beam?
trans_indices = best_costs_indices / voc_size
# which word?
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
new_states = self.comp_next_states(c, k, inputs, *new_states)
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
# beam size is naturally reduced when multiple best
# new trans came from same beam
states = map(lambda x : x[indices], new_states)
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, False, minlen)
elif n_samples < 500:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search(seq, n_samples * 2, False, minlen)
else:
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def search_with_truth(self, seq, truth, n_samples, ignore_unk=False, minlen=1, idict=None):
for ww in truth:
print idict[ww],
print ''
c = self.comp_repr(seq)[0]
# one representation at each encoding time step
# c.shape[0] = len(seq)
# states is a dim_dimensional vector, output of initialization unit
states = map(lambda x : x[None, :], self.comp_init_states(c))
# dimension of hidden layer
dim = states[0].shape[1]
# always 1 in case of non-deep GRU
num_levels = len(states)
fin_trans = []
fin_costs = []
trans = [[]]
costs = [0.0]
# maximum translation length allowed is 3*len(source)
for k in range(3 * len(seq)):
if n_samples == 0:
# all translation ended
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t : t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
log_probs = numpy.log(self.comp_next_probs(c, k, last_words, *states)[0])
print str(k) + '\t' + '|',
if k > 0 and k <= len(truth):
if truth[k - 1] < 30000:
print idict[truth[k - 1]] + '\t' + '|',
else:
print '<EOS>' + '\t',
for ww in last_words:
print idict[ww] + ' ',
print ''
else:
print last_words
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:,self.unk_id] = -numpy.inf
# TODO: report me in the paper!!!
if k < minlen:
log_probs[:,self.eos_id] = -numpy.inf
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
# which beam?
trans_indices = best_costs_indices / voc_size
# which word?
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * (n_samples)
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
new_states = self.comp_next_states(c, k, inputs, *new_states)
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
# beam size is naturally reduced when multiple best
# new trans came from same beam
states = map(lambda x : x[indices], new_states)
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search_with_truth(seq, truth, n_samples, False, minlen, idict)
elif n_samples < 500:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search_with_truth(seq, truth, n_samples * 2, False, minlen, idict)
else:
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, truth, n_samples,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
alpha=1, verbose=False, idict=None):
if truth is not None and beam_search:
sentences = []
trans, costs = beam_search.search_with_truth(seq, truth, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2, idict=idict)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.word_indxs, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
print "{}: {}".format(costs[i], sentences[i])
return sentences, costs, trans
elif beam_search:
sentences = []
trans, costs = beam_search.search(seq, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.word_indxs, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
print "{}: {}".format(costs[i], sentences[i])
return sentences, costs, trans
elif sampler:
sentences = []
all_probs = []
costs = []
values, cond_probs = sampler(n_samples, 3 * (len(seq) - 1), alpha, seq)
for sidx in xrange(n_samples):
sen = []
for k in xrange(values.shape[0]):
if lm_model.word_indxs[values[k, sidx]] == '<eol>':
break
sen.append(lm_model.word_indxs[values[k, sidx]])
sentences.append(" ".join(sen))
probs = cond_probs[:, sidx]
probs = numpy.array(cond_probs[:len(sen) + 1, sidx])
all_probs.append(numpy.exp(-probs))
costs.append(-numpy.sum(probs))
if normalize:
counts = [len(s.strip().split(" ")) for s in sentences]
costs = [co / cn for co, cn in zip(costs, counts)]
sprobs = numpy.argsort(costs)
if verbose:
for pidx in sprobs:
print "{}: {} {} {}".format(pidx, -costs[pidx], all_probs[pidx], sentences[pidx])
print
return sentences, costs, None
else:
raise Exception("I don't know what to do")
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-serch) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--target",
help="File of target sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("model_path",
help="Path to the model")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word_src = cPickle.load(open(state['word_indx'],'rb'))
indx_word_trg = cPickle.load(open(state['word_indx_trgt'],'rb'))
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_dec)
beam_search.compile()
else:
sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'],'r'))
idict_trg = cPickle.load(open(state['indx_word_target'],'r'))
if args.source and args.target:
# Actually only beam search is currently supported here
# assert beam_search
# assert args.beam_size
fsrc = open(args.source, 'r')
ftrg = open(args.target, 'r')
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.debug("Beam size: {}".format(n_samples))
for srcline, trgline in zip(fsrc, ftrg):
src_seqin = srcline.strip()
trg_seqin = trgline.strip()
src_seq, src_parsed_in = parse_input(state, indx_word_src, src_seqin, idx2word=idict_src)
trg_seq, trg_parsed_in = parse_input(state, indx_word_trg, trg_seqin, idx2word=idict_trg)
if args.verbose:
print "Parsed Input:", parsed_in
trans, costs, _ = sample(lm_model, src_seq, trg_seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk,
normalize=args.normalize, verbose=True, idict=idict_trg)
best = numpy.argmin(costs)
# if args.verbose:
# print "Translation:", trans[best]
total_cost += costs[best]
fsrc.close()
ftrg.close()
elif args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.debug("Beam size: {}".format(n_samples))
for i, line in enumerate(fsrc):
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
if args.verbose:
print "Parsed Input:", parsed_in
trans, costs, _ = sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)
best = numpy.argmin(costs)
print >>ftrans, trans[best]
if args.verbose:
print "Translation:", trans[best]
total_cost += costs[best]
if (i + 1) % 100 == 0:
ftrans.flush()
logger.debug("Current speed is {} per sentence".
format((time.time() - start_time) / (i + 1)))
print "Total cost of the translations: {}".format(total_cost)
fsrc.close()
ftrans.close()
elif args.source:
fsrc = open(args.source, 'rb')
n_samples = args.beam_size
for i, line in enumerate(fsrc):
seqin = line.strip()
seq, parsed_in = parse_input(state,
indx_word,
seqin,
idx2word=idict_src)
print 'Parsed Input:', parsed_in
trans, cost, _ = sample(lm_model,
seq,
n_samples,
beam_search=beam_search,
ignore_unk=args.ignore_unk,
normalize=args.normalize,
verbose=True)
else:
while True:
try:
seqin = raw_input('Input Sequence: ')
n_samples = int(raw_input('How many samples? '))
alpha = None
if not args.beam_search:
alpha = float(raw_input('Inverse Temperature? '))
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
print "Parsed Input:", parsed_in
except Exception:
print "Exception while parsing your input:"
traceback.print_exc()
continue
sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search,
ignore_unk=args.ignore_unk, normalize=args.normalize,
alpha=alpha, verbose=True)
if __name__ == "__main__":
main()
| bsd-3-clause |
iegor/kdeutils | superkaramba/examples/image/image.py | 4 | 4192 | #this import statement allows access to the karamba functions
import karamba
images = [0,0,0,0,0,0,0,0,0,0,0,0,0,0]
c = 0
#this is called when you widget is initialized
def initWidget(widget):
global images
images[0] = karamba.getThemeImage(widget, "image0")
images[1] = karamba.getThemeImage(widget, "image1")
images[2] = karamba.getThemeImage(widget, "image2")
images[3] = karamba.getThemeImage(widget, "image3")
images[4] = karamba.getThemeImage(widget, "image4")
images[5] = karamba.getThemeImage(widget, "image5")
images[6] = karamba.getThemeImage(widget, "image6")
images[7] = karamba.getThemeImage(widget, "image7")
images[8] = karamba.getThemeImage(widget, "image8")
images[9] = karamba.getThemeImage(widget, "image9")
images[10] = karamba.getThemeImage(widget, "image10")
images[11] = karamba.getThemeImage(widget, "image11")
images[12] = karamba.getThemeImage(widget, "image12")
images[13] = karamba.getThemeImage(widget, "image13")
#this is called everytime your widget is updated
#the update inverval is specified in the .theme file
def widgetUpdated(widget):
global images, c
b = c%2
# Create & delete
if(images[0]):
karamba.deleteImage(widget, images[0])
images[0] = 0
else:
images[0] = karamba.createImage(widget, 0, 20, "flag.png")
# Hide & Show
if(b):
karamba.hideImage(widget, images[1])
else:
karamba.showImage(widget, images[1])
# size & resize
size = karamba.getImageSize(widget, images[1])
print "getImageSize: " + str(size)
print "getImageWidth: " + str(karamba.getImageWidth(widget, images[1]))
print "getImageHeight: " + str(karamba.getImageHeight(widget, images[1]))
# Auto size
#size = ((b * 200) + 200, size[1])
#karamba.resizeImage(widget, images[1], size[0], size[1])
# pos & move
pos = karamba.getImagePos(widget, images[2])
print "getImagePos: " + str(pos)
pos = (b * 200, pos[1])
karamba.moveImage(widget, images[2], pos[0], pos[1])
# Sensor
sensor = karamba.getImageSensor(widget, images[3])
print "getSensor: " + str(sensor)
if(b):
karamba.setImageSensor(widget, images[3], 'SENSOR=PROGRAM PROGRAM="/tmp/test1.sh"')
else:
karamba.setImageSensor(widget, images[3], 'SENSOR=PROGRAM PROGRAM="/tmp/test2.sh"')
# Value
v = karamba.getImagePath(widget, images[4])
print "getImagePath: ", v
if(b):
v = 'flag.png'
else:
v = 'flag2.png'
karamba.setImagePath(widget, images[4], v)
if((c % 10) == 0):
karamba.removeImageEffects(widget, images[5])
karamba.removeImageEffects(widget, images[6])
karamba.removeImageEffects(widget, images[7])
else:
karamba.changeImageIntensity(widget, images[5], (float(c%10) / 5 - 1.0))
karamba.changeImageChannelIntensity(widget, images[6], (float(c%10) / 5 - 1.0), 'blue')
karamba.changeImageToGray(widget, images[7], 0)
if((c % 9) == 0):
karamba.removeImageTransformations(widget, images[8])
karamba.removeImageTransformations(widget, images[9])
karamba.removeImageTransformations(widget, images[10])
else:
karamba.rotateImage(widget, images[8], (c%9)*20 + 45)
karamba.resizeImage(widget, images[9], 50 + (c%5)*10, size[1])
karamba.resizeImageSmooth(widget, images[10], 50 + (c%5)*10, size[1])
if((c % 10) == 0):
karamba.addImageTooltip(widget, images[11], str(c))
if((c % 20) == 0):
if(images[12]):
karamba.deleteImage(widget, images[12])
images[12] = 0
else:
images[12] = karamba.createBackgroundImage(widget, 0, 340, "flag.png")
if(images[13]):
karamba.deleteImage(widget, images[13])
images[13] = 0
else:
tlist = karamba.getTaskList(widget)
images[13] = karamba.createTaskIcon(widget, 50, 340, tlist[c % len(tlist)])
c += 1
def widgetClicked(widget, x, y, button):
pass
def widgetMouseMoved(widget, x, y, button):
pass
# This will be printed when the widget loads.
print "Loaded Image test python extension!"
| gpl-2.0 |
bootphon/tde | tde/substrings/acss.py | 1 | 3207 | """Find common substrings.
"""
from itertools import combinations
import numpy as np
from tde.substrings.ccss import allcommonsubstrings as _acss
from tde.data.interval import Interval
from tde.data.fragment import FragmentToken
def pairwise_substring_completion(fragment1, fragment2, corpus,
minlength, maxlength):
name1, name2 = fragment1.name, fragment2.name
tokenseq1 = [(f.mark, f.interval)
for f in corpus.tokens(name1, fragment1.interval)]
tokenseq2 = [(f.mark, f.interval)
for f in corpus.tokens(name2, fragment2.interval)]
for seq1, seq2 in psubstrings(tokenseq1, tokenseq2, minlength, maxlength):
submark1, intervalseq1 = zip(*seq1)
submark2, intervalseq2 = zip(*seq2)
interval1 = Interval(intervalseq1[0].start, intervalseq1[-1].end)
interval2 = Interval(intervalseq2[0].start, intervalseq2[-1].end)
yield (FragmentToken(name1, interval1, submark1),
FragmentToken(name2, interval2, submark2))
def psubstrings(s1, s2, minlength, maxlength):
if len(s1) > len(s2):
for a, b in psubstrings(s2, s1, minlength, maxlength):
yield b, a
else:
if len(s1) == len(s2):
for ss_len in xrange(minlength, min(len(s1) + 1, maxlength)):
for start in xrange(0, len(s1) - ss_len + 1):
yield s1[start:start+ss_len], s2[start:start+ss_len]
else:
for offset in xrange(len(s2) - len(s1) + 1):
for ss in psubstrings(s1, s2[offset:offset+len(s1)],
minlength, maxlength):
yield ss
def substrings(s, minlength, maxlength):
"""Generate all substrings of s.
Parameters
----------
s : iterable
minlength : minimum length of substrings
Returns
-------
i : iterator over substrings of s
"""
for start, end in combinations(xrange(len(s)+1), 2):
if minlength <= end - start <= maxlength:
yield s[start: end]
def allcommonsubstrings(s1, s2=None, minlength=3, maxlength=20, same=False):
"""Find all common substrings.
The algorithm used is a simple dp. This could be sped up by using
suffix trees.
Parameters
----------
s1 : iterable
s2 : iterable, optional
minlength : int, optional
minimum length of substrings
Returns
-------
r : list of (Slice, Slice)
Indices into `s1` and `s2`
"""
if s2 is None or same:
symbols = sorted(list(set(s1)))
else:
symbols = sorted(list(set(s1 + s2)))
if s2 is None or same:
s2 = s1
same = 1
else:
same = 0
if s1 == [] or s2 == []:
return []
sym2idx = {v: k for k, v in enumerate(symbols)}
s1_arr = np.fromiter((sym2idx[s] for s in s1), dtype=np.long)
s2_arr = np.fromiter((sym2idx[s] for s in s2), dtype=np.long)
css = _acss(s1_arr, s2_arr, minlength, maxlength, same)
if css is None:
return []
r = []
for row in css:
r.append((slice(row[0], row[0]+row[2]),
slice(row[1], row[1]+row[2])))
return r
| gpl-3.0 |
piquadrat/django | django/db/models/sql/subqueries.py | 14 | 7198 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""A DELETE SQL query."""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.alias_map = {table: self.alias_map[table]}
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = tuple([t for t in innerq.alias_map if innerq.alias_refcount[t]])
if not innerq_used_tables or innerq_used_tables == tuple(self.alias_map):
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""An UPDATE SQL query."""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Run on initialization and at the end of chaining. Any attributes that
would normally be set in __init__() should go here instead.
"""
self.values = []
self.related_ids = None
self.related_updates = {}
def clone(self):
obj = super().clone()
obj.related_updates = self.related_updates.copy()
return obj
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.items():
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
for field, model, val in values_seq:
if hasattr(val, 'resolve_expression'):
# Resolve expressions here so that annotations are no longer needed
val = val.resolve_expression(self, allow_joins=False, for_save=True)
self.values.append((field, model, val))
def add_related_update(self, model, field, value):
"""
Add (name, value) to an update query for an ancestor model.
Update are coalesced so that only one update query per ancestor is run.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Return a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.items():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields = []
self.objs = []
def insert_values(self, fields, objs, raw=False):
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
Take another query as a parameter to the FROM clause and only select the
elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
query.subquery = True
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| bsd-3-clause |
zhoushun/googletest | scripts/pump.py | 2471 | 23673 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
dwoods/gn-maps | geonode/maps/management/__init__.py | 9 | 1764 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
import logging
logger = logging.getLogger(__name__)
if "notification" in settings.INSTALLED_APPS:
import notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.models.NoticeType.create("map_created", _("Map Created"), _("A Map was created"))
notification.models.NoticeType.create("map_comment", _("Comment on Map"), _("A map was commented on"))
notification.models.NoticeType.create("map_rated", _("Rating for Map"), _("A rating was given to a map"))
signals.post_syncdb.connect(create_notice_types, sender=notification.models)
logger.info("Notifications Configured for geonode.maps.management.commands")
else:
logger.info("Skipping creation of NoticeTypes for geonode.maps.management.commands, since notification app was not found.")
| gpl-3.0 |
deepmind/spriteworld | tests/renderers/pil_renderer_test.py | 1 | 3531 | # Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for pil_renderer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
from absl.testing import absltest
import numpy as np
from spriteworld import sprite
from spriteworld.renderers import pil_renderer
class PilRendererTest(absltest.TestCase):
def _get_sprites(self):
"""Get list of sprites."""
sprites = [
sprite.Sprite(
x=0.75, y=0.95, shape='spoke_6', scale=0.2, c0=20, c1=50, c2=80),
sprite.Sprite(
x=0.2, y=0.3, shape='triangle', scale=0.1, c0=150, c1=255, c2=100),
sprite.Sprite(
x=0.7, y=0.5, shape='square', scale=0.3, c0=0, c1=255, c2=0),
sprite.Sprite(
x=0.5, y=0.5, shape='square', scale=0.3, c0=255, c1=0, c2=0),
]
return sprites
def testBasicFunctionality(self):
renderer = pil_renderer.PILRenderer(image_size=(64, 64))
renderer.render(self._get_sprites())
def testBackground(self):
bg_color = (5, 6, 7)
renderer = pil_renderer.PILRenderer(image_size=(64, 64), bg_color=bg_color)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[5, 5]), bg_color)
def testOcclusion(self):
renderer = pil_renderer.PILRenderer(image_size=(64, 64))
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[32, 32]), [255, 0, 0])
self.assertSequenceEqual(list(image[32, 50]), [0, 255, 0])
def testAntiAliasing(self):
renderer = pil_renderer.PILRenderer(image_size=(16, 16), anti_aliasing=5)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[4, 6]), [0, 0, 0])
self.assertSequenceEqual(list(image[6, 6]), [255, 0, 0])
# Python2 and Python3 give slightly different anti-aliasing, so we specify
# bounds for border values:
self.assertTrue(all(image[5, 6] >= [50, 0, 0]))
self.assertTrue(all(image[5, 6] <= [120, 30, 0]))
self.assertTrue(all(image[7, 6] >= [200, 0, 0]))
self.assertTrue(all(image[7, 6] <= [255, 50, 0]))
renderer = pil_renderer.PILRenderer(image_size=(16, 16), anti_aliasing=1)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[4, 6]), [0, 0, 0])
self.assertSequenceEqual(list(image[6, 6]), [255, 0, 0])
self.assertSequenceEqual(list(image[7, 6]), [255, 0, 0])
def testColorToRGB(self):
s = sprite.Sprite(x=0.5, y=0.5, shape='square', c0=0.2, c1=0.5, c2=0.5)
def _color_to_rgb(c):
return tuple((255 * np.array(colorsys.hsv_to_rgb(*c))).astype(np.uint8))
renderer = pil_renderer.PILRenderer(
image_size=(64, 64), color_to_rgb=_color_to_rgb)
image = renderer.render([s])
self.assertSequenceEqual(list(image[32, 32]), [114, 127, 63])
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
hovatterz/yaml-cpp.core | test/gmock-1.7.0/gtest/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| mit |
NunoEdgarGub1/nupic | tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py | 8 | 16830 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { u'A': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'B': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'C': { 'fieldname': u'precip',
'n': 300,
'name': u'precip',
'type': 'SDRCategoryEncoder',
'w': 21},
u'D': { 'clipInput': True,
'fieldname': u'visitor_winloss',
'maxval': 0.78600000000000003,
'minval': 0.0,
'n': 150,
'name': u'visitor_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'E': { 'clipInput': True,
'fieldname': u'home_winloss',
'maxval': 0.69999999999999996,
'minval': 0.0,
'n': 150,
'name': u'home_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'F': { 'dayOfWeek': (7, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'G': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 1),
'type': 'DateEncoder'},
u'pred': { 'clipInput': True,
'fieldname': u'attendance',
'maxval': 36067,
'minval': 0,
'n': 150,
'name': u'attendance',
'type': 'AdaptiveScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'baseball benchmark test',
u'streams': [ { u'columns': [ u'daynight',
u'precip',
u'home_winloss',
u'visitor_winloss',
u'attendance',
u'timestamp'],
u'info': u'OAK01.csv',
u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='trivial_aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 |
Pablo126/SSBW | Tarea4/tarea4/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 |
Affirm/moto | moto/ec2/responses/elastic_ip_addresses.py | 3 | 6250 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class ElasticIPAddresses(BaseResponse):
def allocate_address(self):
domain = self._get_param('Domain', if_none='standard')
if self.is_not_dryrun('AllocateAddress'):
address = self.ec2_backend.allocate_address(domain)
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self):
instance = eni = None
if "InstanceId" in self.querystring:
instance = self.ec2_backend.get_instance(
self._get_param('InstanceId'))
elif "NetworkInterfaceId" in self.querystring:
eni = self.ec2_backend.get_network_interface(
self._get_param('NetworkInterfaceId'))
else:
self.ec2_backend.raise_error(
"MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.")
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self._get_param('AllowReassociation') == "true"
if self.is_not_dryrun('AssociateAddress'):
if instance or eni:
if "PublicIp" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance, eni=eni,
address=self._get_param('PublicIp'), reassociate=reassociate)
elif "AllocationId" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance, eni=eni,
allocation_id=self._get_param('AllocationId'), reassociate=reassociate)
else:
self.ec2_backend.raise_error(
"MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.")
else:
self.ec2_backend.raise_error(
"MissingParameter", "Invalid request, expect either instance or ENI.")
template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
def describe_addresses(self):
allocation_ids = self._get_multi_param('AllocationId')
public_ips = self._get_multi_param('PublicIp')
filters = filters_from_querystring(self.querystring)
addresses = self.ec2_backend.describe_addresses(
allocation_ids, public_ips, filters)
template = self.response_template(DESCRIBE_ADDRESS_RESPONSE)
return template.render(addresses=addresses)
def disassociate_address(self):
if self.is_not_dryrun('DisAssociateAddress'):
if "PublicIp" in self.querystring:
self.ec2_backend.disassociate_address(
address=self._get_param('PublicIp'))
elif "AssociationId" in self.querystring:
self.ec2_backend.disassociate_address(
association_id=self._get_param('AssociationId'))
else:
self.ec2_backend.raise_error(
"MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.")
return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render()
def release_address(self):
if self.is_not_dryrun('ReleaseAddress'):
if "PublicIp" in self.querystring:
self.ec2_backend.release_address(
address=self._get_param('PublicIp'))
elif "AllocationId" in self.querystring:
self.ec2_backend.release_address(
allocation_id=self._get_param('AllocationId'))
else:
self.ec2_backend.raise_error(
"MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.")
return self.response_template(RELEASE_ADDRESS_RESPONSE).render()
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.eni %}
<networkInterfaceId>{{ address.eni.id }}</networkInterfaceId>
{% else %}
<networkInterfaceId/>
{% endif %}
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""
| apache-2.0 |
bmistry13/kafka | system_test/utils/system_test_utils.py | 88 | 23697 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# system_test_utils.py
# ===================================
import copy
import difflib
import inspect
import json
import logging
import os
import re
import signal
import socket
import subprocess
import sys
import time
logger = logging.getLogger("namedLogger")
aLogger = logging.getLogger("anonymousLogger")
thisClassName = '(system_test_utils)'
d = {'name_of_class': thisClassName}
def get_current_unix_timestamp():
ts = time.time()
return "{0:.6f}".format(ts)
def get_local_hostname():
return socket.gethostname()
def sys_call(cmdStr):
output = ""
#logger.info("executing command [" + cmdStr + "]", extra=d)
p = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output += line
return output
def remote_async_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
async_sys_call(cmdStr)
def remote_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
def get_dir_paths_with_prefix(fullPath, dirNamePrefix):
dirsList = []
for dirName in os.listdir(fullPath):
if not os.path.isfile(dirName) and dirName.startswith(dirNamePrefix):
dirsList.append(os.path.abspath(fullPath + "/" + dirName))
return dirsList
def get_testcase_prop_json_pathname(testcasePathName):
testcaseDirName = os.path.basename(testcasePathName)
return testcasePathName + "/" + testcaseDirName + "_properties.json"
def get_json_list_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_list = []
for key,settings in json_data.items():
if type(settings) == list:
for setting in settings:
if type(setting) == dict:
kv_dict = {}
for k,v in setting.items():
kv_dict[k] = v
data_list.append(kv_dict)
return data_list
def get_dict_from_list_of_dicts(listOfDicts, lookupKey, lookupVal):
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
#
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
#
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
retList.append( dict )
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
retList.append( dict )
return retList
def get_data_from_list_of_dicts(listOfDicts, lookupKey, lookupVal, fieldToRetrieve):
# Sample List of Dicts:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# => returns ['zookeeper']
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# => returns ['zookeeper', 'broker']
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
return retList
def get_data_by_lookup_keyval(listOfDict, lookupKey, lookupVal, fieldToRetrieve):
returnValue = ""
returnValuesList = get_data_from_list_of_dicts(listOfDict, lookupKey, lookupVal, fieldToRetrieve)
if len(returnValuesList) > 0:
returnValue = returnValuesList[0]
return returnValue
def get_json_dict_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_dict = {}
for key,val in json_data.items():
if ( type(val) != list ):
data_dict[key] = val
return data_dict
def get_remote_child_processes(hostname, pid):
pidStack = []
cmdList = ['''ssh ''' + hostname,
''''pid=''' + pid + '''; prev_pid=""; echo $pid;''',
'''while [[ "x$pid" != "x" ]];''',
'''do prev_pid=$pid;''',
''' for child in $(ps -o pid,ppid ax | awk "{ if ( \$2 == $pid ) { print \$1 }}");''',
''' do echo $child; pid=$child;''',
''' done;''',
''' if [ $prev_pid == $pid ]; then''',
''' break;''',
''' fi;''',
'''done' 2> /dev/null''']
cmdStr = " ".join(cmdList)
logger.debug("executing command [" + cmdStr, extra=d)
subproc = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE)
for line in subproc.stdout.readlines():
procId = line.rstrip('\n')
pidStack.append(procId)
return pidStack
def get_child_processes(pid):
pidStack = []
currentPid = pid
parentPid = ""
pidStack.append(pid)
while ( len(currentPid) > 0 ):
psCommand = subprocess.Popen("ps -o pid --ppid %s --noheaders" % currentPid, shell=True, stdout=subprocess.PIPE)
psOutput = psCommand.stdout.read()
outputLine = psOutput.rstrip('\n')
childPid = outputLine.lstrip()
if ( len(childPid) > 0 ):
pidStack.append(childPid)
currentPid = childPid
else:
break
return pidStack
def sigterm_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -15 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def sigkill_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -9 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTimeInSeconds):
pausedPidStack = []
# pause the processes
while len(pidStack) > 0:
pid = pidStack.pop()
pausedPidStack.append(pid)
cmdStr = "ssh " + hostname + " 'kill -SIGSTOP " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
time.sleep(int(pauseTimeInSeconds))
# resume execution of the processes
while len(pausedPidStack) > 0:
pid = pausedPidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -SIGCONT " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def terminate_process(pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
try:
os.kill(int(pid), signal.SIGTERM)
except:
print "WARN - pid:",pid,"not found"
raise
def convert_keyval_to_cmd_args(configFilePathname):
cmdArg = ""
inlines = open(configFilePathname, "r").readlines()
for inline in inlines:
line = inline.rstrip()
tokens = line.split('=', 1)
if (len(tokens) == 2):
cmdArg = cmdArg + " --" + tokens[0] + " " + tokens[1]
elif (len(tokens) == 1):
cmdArg = cmdArg + " --" + tokens[0]
else:
print "ERROR: unexpected arguments list", line
return cmdArg
def async_sys_call(cmd_str):
subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def sys_call_return_subproc(cmd_str):
p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p
def remote_host_file_exists(hostname, pathname):
cmdStr = "ssh " + hostname + " 'ls " + pathname + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_directory_exists(hostname, path):
cmdStr = "ssh " + hostname + " 'ls -d " + path + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_processes_stopped(hostname):
cmdStr = "ssh " + hostname + \
" \"ps auxw | grep -v grep | grep -v Bootstrap | grep -i 'java\|run\-\|producer\|consumer\|jmxtool\|kafka' | wc -l\" 2> /dev/null"
logger.info("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.info("no. of running processes found : [" + line + "]", extra=d)
if line == '0':
return True
return False
def setup_remote_hosts(systemTestEnv):
# sanity check on remote hosts to make sure:
# - all directories (eg. java_home) specified in cluster_config.json exists in all hosts
# - no conflicting running processes in remote hosts
aLogger.info("=================================================")
aLogger.info("setting up remote hosts ...")
aLogger.info("=================================================")
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
localKafkaHome = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/..")
# when configuring "default" java_home, use JAVA_HOME environment variable, if exists
# otherwise, use the directory with the java binary
localJavaHome = os.environ.get('JAVA_HOME')
if localJavaHome is not None:
localJavaBin = localJavaHome + '/bin/java'
else:
subproc = sys_call_return_subproc("which java")
for line in subproc.stdout.readlines():
if line.startswith("which: no "):
logger.error("No Java binary found in local host", extra=d)
return False
else:
line = line.rstrip('\n')
localJavaBin = line
matchObj = re.match("(.*)\/bin\/java$", line)
localJavaHome = matchObj.group(1)
listIndex = -1
for clusterEntityConfigDict in clusterEntityConfigDictList:
listIndex += 1
hostname = clusterEntityConfigDict["hostname"]
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
if hostname == "localhost" and javaHome == "default":
clusterEntityConfigDictList[listIndex]["java_home"] = localJavaHome
if hostname == "localhost" and kafkaHome == "default":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome
if hostname == "localhost" and kafkaHome == "system_test/migration_tool_testsuite/0.7":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome + "/system_test/migration_tool_testsuite/0.7"
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
logger.debug("checking java binary [" + localJavaBin + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, javaHome):
logger.error("Directory not found: [" + javaHome + "] in host [" + hostname + "]", extra=d)
return False
logger.debug("checking directory [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, kafkaHome):
logger.info("Directory not found: [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if hostname == "localhost":
return False
else:
localKafkaSourcePath = systemTestEnv.SYSTEM_TEST_BASE_DIR + "/.."
logger.debug("copying local copy of [" + localKafkaSourcePath + "] to " + hostname + ":" + kafkaHome, extra=d)
copy_source_to_remote_hosts(hostname, localKafkaSourcePath, kafkaHome)
return True
def copy_source_to_remote_hosts(hostname, sourceDir, destDir):
cmdStr = "rsync -avz --delete-before " + sourceDir + "/ " + hostname + ":" + destDir
logger.info("executing command [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
dummyVar = 1
def remove_kafka_home_dir_at_remote_hosts(hostname, kafkaHome):
if remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"):
cmdStr = "ssh " + hostname + " 'chmod -R 777 " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
cmdStr = "ssh " + hostname + " 'rm -rf " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
#sys_call(cmdStr)
else:
logger.warn("possible destructive command [" + cmdStr + "]", extra=d)
logger.warn("check config file: system_test/cluster_config.properties", extra=d)
logger.warn("aborting test...", extra=d)
sys.exit(1)
def get_md5_for_file(filePathName, blockSize=8192):
md5 = hashlib.md5()
f = open(filePathName, 'rb')
while True:
data = f.read(blockSize)
if not data:
break
md5.update(data)
return md5.digest()
def load_cluster_config(clusterConfigPathName, clusterEntityConfigDictList):
# empty the list
clusterEntityConfigDictList[:] = []
# retrieve each entity's data from cluster config json file
# as "dict" and enter them into a "list"
jsonFileContent = open(clusterConfigPathName, "r").read()
jsonData = json.loads(jsonFileContent)
for key, cfgList in jsonData.items():
if key == "cluster_config":
for cfg in cfgList:
clusterEntityConfigDictList.append(cfg)
def setup_remote_hosts_with_testcase_level_cluster_config(systemTestEnv, testCasePathName):
# =======================================================================
# starting a new testcase, check for local cluster_config.json
# =======================================================================
# 1. if there is a xxxx_testsuite/testcase_xxxx/cluster_config.json
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO testcase_xxxx/cluster_config.json but has a xxxx_testsuite/cluster_config.json
# => retore systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite
# 3. if there is NO testcase_xxxx/cluster_config.json NOR xxxx_testsuite/cluster_config.json
# => restore system_test/cluster_config.json
testCaseLevelClusterConfigPathName = testCasePathName + "/cluster_config.json"
if os.path.isfile(testCaseLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testCaseLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testcase level
load_cluster_config(testCaseLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testcase level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestCase = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
elif len(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) > 0:
# if there is NO testcase_xxxx/cluster_config.json, but has a xxxx_testsuite/cluster_config.json
# => restore the config in xxxx_testsuite/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
def setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName):
# =======================================================================
# starting a new testsuite, check for local cluster_config.json:
# =======================================================================
# 1. if there is a xxxx_testsuite/cluster_config.son
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO xxxx_testsuite/cluster_config.son
# => restore system_test/cluster_config.json
testSuiteLevelClusterConfigPathName = testModulePathName + "/cluster_config.json"
if os.path.isfile(testSuiteLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testSuiteLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testsuite level
load_cluster_config(testSuiteLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testsuite level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the last testsuite level cluster config list
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite[:] = []
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
# =================================================
# lists_diff_count
# - find the no. of different items in both lists
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def lists_diff_count(a, b):
c = list(b)
d = []
for item in a:
try:
c.remove(item)
except:
d.append(item)
if len(d) > 0:
print "#### Mismatch MessageID"
print d
return len(c) + len(d)
# =================================================
# subtract_list
# - subtract items in listToSubtract from mainList
# and return the resulting list
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def subtract_list(mainList, listToSubtract):
remainingList = list(mainList)
for item in listToSubtract:
try:
remainingList.remove(item)
except:
pass
return remainingList
# =================================================
# diff_lists
# - find the diff of 2 lists and return the
# total no. of mismatch from both lists
# - diff of both lists includes:
# - no. of items mismatch
# - ordering of the items
#
# sample lists:
# a = ['8','4','3','2','1']
# b = ['8','3','4','2','1']
#
# difflib will return the following:
# 8
# + 3
# 4
# - 3
# 2
# 1
#
# diff_lists(a,b) returns 2 and prints the following:
# #### only in seq 2 : + 3
# #### only in seq 1 : - 3
# =================================================
def diff_lists(a, b):
mismatchCount = 0
d = difflib.Differ()
diff = d.compare(a,b)
for item in diff:
result = item[0:1].strip()
if len(result) > 0:
mismatchCount += 1
if '-' in result:
logger.debug("#### only in seq 1 : " + item, extra=d)
elif '+' in result:
logger.debug("#### only in seq 2 : " + item, extra=d)
return mismatchCount
| apache-2.0 |
cliffano/swaggy-jenkins | clients/python/generated/swaggyjenkins/models/github_scmlinks.py | 1 | 3557 | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class GithubScmlinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'Link',
'_class': 'str'
}
attribute_map = {
'_self': 'self',
'_class': '_class'
}
def __init__(self, _self=None, _class=None): # noqa: E501
"""GithubScmlinks - a model defined in OpenAPI""" # noqa: E501
self.__self = None
self.__class = None
self.discriminator = None
if _self is not None:
self._self = _self
if _class is not None:
self._class = _class
@property
def _self(self):
"""Gets the _self of this GithubScmlinks. # noqa: E501
:return: The _self of this GithubScmlinks. # noqa: E501
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this GithubScmlinks.
:param _self: The _self of this GithubScmlinks. # noqa: E501
:type: Link
"""
self.__self = _self
@property
def _class(self):
"""Gets the _class of this GithubScmlinks. # noqa: E501
:return: The _class of this GithubScmlinks. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this GithubScmlinks.
:param _class: The _class of this GithubScmlinks. # noqa: E501
:type: str
"""
self.__class = _class
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GithubScmlinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.2/django/contrib/gis/db/models/proxy.py | 404 | 2512 | """
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value==''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None: value.srid = self._field.srid
elif value is None or isinstance(value, (basestring, buffer)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('cannot set %s GeometryProxy with value of type: %s' % (obj.__class__.__name__, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
| mit |
M4sse/chromium.src | tools/perf/benchmarks/octane.py | 9 | 5659 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Octane 2.0 javascript benchmark.
Octane 2.0 is a modern benchmark that measures a JavaScript engine's performance
by running a suite of tests representative of today's complex and demanding web
applications. Octane's goal is to measure the performance of JavaScript code
found in large, real-world web applications.
Octane 2.0 consists of 17 tests, four more than Octane v1.
"""
import os
from metrics import power
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.util import statistics
from telemetry.value import scalar
_GB = 1024 * 1024 * 1024
DESCRIPTIONS = {
'CodeLoad':
'Measures how quickly a JavaScript engine can start executing code '
'after loading a large JavaScript program, social widget being a common '
'example. The source for test is derived from open source libraries '
'(Closure, jQuery) (1,530 lines).',
'Crypto':
'Encryption and decryption benchmark based on code by Tom Wu '
'(1698 lines).',
'DeltaBlue':
'One-way constraint solver, originally written in Smalltalk by John '
'Maloney and Mario Wolczko (880 lines).',
'EarleyBoyer':
'Classic Scheme benchmarks, translated to JavaScript by Florian '
'Loitsch\'s Scheme2Js compiler (4684 lines).',
'Gameboy':
'Emulate the portable console\'s architecture and runs a demanding 3D '
'simulation, all in JavaScript (11,097 lines).',
'Mandreel':
'Runs the 3D Bullet Physics Engine ported from C++ to JavaScript via '
'Mandreel (277,377 lines).',
'NavierStokes':
'2D NavierStokes equations solver, heavily manipulates double precision '
'arrays. Based on Oliver Hunt\'s code (387 lines).',
'PdfJS':
'Mozilla\'s PDF Reader implemented in JavaScript. It measures decoding '
'and interpretation time (33,056 lines).',
'RayTrace':
'Ray tracer benchmark based on code by Adam Burmister (904 lines).',
'RegExp':
'Regular expression benchmark generated by extracting regular '
'expression operations from 50 of the most popular web pages '
'(1761 lines).',
'Richards':
'OS kernel simulation benchmark, originally written in BCPL by Martin '
'Richards (539 lines).',
'Splay':
'Data manipulation benchmark that deals with splay trees and exercises '
'the automatic memory management subsystem (394 lines).',
}
class _OctaneMeasurement(page_test.PageTest):
def __init__(self):
super(_OctaneMeasurement, self).__init__(
action_name_to_run='RunPageInteractions')
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def WillNavigateToPage(self, page, tab):
memory_stats = tab.browser.memory_stats
if ('SystemTotalPhysicalMemory' in memory_stats and
memory_stats['SystemTotalPhysicalMemory'] < 1 * _GB):
skipBenchmarks = '"zlib"'
else:
skipBenchmarks = ''
page.script_to_evaluate_on_commit = """
var __results = [];
var __real_log = window.console.log;
window.console.log = function(msg) {
__results.push(msg);
__real_log.apply(this, [msg]);
}
skipBenchmarks = [%s]
""" % (skipBenchmarks)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('window.completed', 10)
tab.WaitForJavaScriptExpression(
'!document.getElementById("progress-bar-container")', 1200)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
results_log = tab.EvaluateJavaScript('__results')
all_scores = []
for output in results_log:
# Split the results into score and test name.
# results log e.g., "Richards: 18343"
score_and_name = output.split(': ', 2)
assert len(score_and_name) == 2, \
'Unexpected result format "%s"' % score_and_name
if 'Skipped' not in score_and_name[1]:
name = score_and_name[0]
score = int(score_and_name[1])
results.AddValue(scalar.ScalarValue(
results.current_page, name, 'score', score, important=False,
description=DESCRIPTIONS.get(name)))
# Collect all test scores to compute geometric mean.
all_scores.append(score)
total = statistics.GeometricMean(all_scores)
results.AddSummaryValue(
scalar.ScalarValue(None, 'Total.Score', 'score', total,
description='Geometric mean of the scores of each '
'individual benchmark in the Octane '
'benchmark collection.'))
class Octane(benchmark.Benchmark):
"""Google's Octane JavaScript benchmark.
http://octane-benchmark.googlecode.com/svn/latest/index.html
"""
test = _OctaneMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
archive_data_file='../page_sets/data/octane.json',
file_path=os.path.abspath(__file__),
bucket=page_set.PUBLIC_BUCKET)
ps.AddUserStory(page_module.Page(
'http://octane-benchmark.googlecode.com/svn/latest/index.html?auto=1',
ps, ps.base_dir, make_javascript_deterministic=False))
return ps
| bsd-3-clause |
arnedesmedt/dotfiles | .config/sublime-text-3/Packages.symlinkfollow/pygments/all/pygments/formatters/bbcode.py | 50 | 3314 | # -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| mit |
aaxelb/SHARE | tests/share/regulate/steps/test_block_extra_values.py | 2 | 1810 | import pytest
from share.regulate.steps.block_extra_values import BlockExtraValues
from share.util.graph import MutableGraph
class TestBlockExtraValuesStep:
@pytest.fixture
def graph(self):
g = MutableGraph()
g.add_node(1, 'creativework', {
'title': 'A work!',
'extra': {
'foo': 'flooby',
'bah': 'hab',
},
})
g.add_node(2, 'creativework', {
'title': 'Another work!',
'extra': {
'extra': 'extra',
'bah': 'hab',
},
})
g.add_node(3, 'creativework', {'title': 'No extra :('})
return g
@pytest.mark.parametrize('blocked_values, expected_nodes', [
({'foo': 'flooby'}, {2, 3}),
({'foo': 'flooby', 'match': 'nothing'}, {1, 2, 3}),
({'extra': 'extra'}, {1, 3}),
({'bah': 'hab'}, {3}),
])
def test_block_extras(self, graph, blocked_values, expected_nodes):
step = BlockExtraValues(blocked_values=blocked_values)
for node in list(graph):
step.regulate_node(node)
if node.id in expected_nodes:
assert node in graph
else:
assert node not in graph
assert len(graph) == len(expected_nodes)
def test_error_on_bad_setting(self):
with pytest.raises(TypeError):
BlockExtraValues(bad_setting=True)
# blocked_values required, must be non-empty dict
with pytest.raises(TypeError):
BlockExtraValues()
with pytest.raises(TypeError):
BlockExtraValues(blocked_values=['bad'])
with pytest.raises(TypeError):
BlockExtraValues(blocked_values={})
BlockExtraValues(blocked_values={'this': 'works'})
| apache-2.0 |
hemny-singh/squealy | squealy/migrations/0004_auto_20170331_0620.py | 5 | 1655 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-31 06:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import squealy.models
class Migration(migrations.Migration):
dependencies = [
('squealy', '0003_merge_20170330_1334'),
]
operations = [
migrations.CreateModel(
name='ScheduledReportChart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='scheduledreport',
name='chart',
),
migrations.AlterField(
model_name='chart',
name='options',
field=squealy.models.CustomJSONField(blank=True, default={}, null=True),
),
migrations.AlterField(
model_name='scheduledreport',
name='template',
field=models.TextField(blank=True, help_text="Add '{% include 'report.html' %}' to include your reports in mail", null=True),
),
migrations.AddField(
model_name='scheduledreportchart',
name='chart',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scheduledreportchart', to='squealy.Chart'),
),
migrations.AddField(
model_name='scheduledreportchart',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relatedscheduledreport', to='squealy.ScheduledReport'),
),
]
| mit |
Splo0sh/3DCT | pyto/test/test_affine_2d.py | 2 | 23214 | """
Tests module affine_2d
# Author: Vladan Lucic
# $Id: test_affine_2d.py 1152 2015-05-26 08:53:37Z vladan $
"""
__version__ = "$Revision: 1152 $"
from copy import copy, deepcopy
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.affine_2d import Affine2D
class TestAffine2D(np_test.TestCase):
"""
"""
def setUp(self):
# basic
self.x0 = numpy.array([[1, 0.], [0, 1], [-1, 0]])
self.y0_0 = 2 * self.x0
self.y0_90 = 2 * numpy.array([[0, 1], [-1, 0], [0, -1]])
self.y0_180 = 2 * numpy.array([[-1, 0.], [0, -1], [1, 0]])
self.y0_270 = 2 * numpy.array([[0, -1], [1, 0], [0, 1]])
# parallelogram, rotation, scale, exact
self.d1 = [-1, 2]
self.x1 = numpy.array([[0., 0], [2, 0], [2, 1], [0, 1]])
self.y1 = numpy.array([[0., 0], [4, 2], [3, 4], [-1, 2]]) + self.d1
self.y1m = numpy.array([[0., 0], [-4, 2], [-3, 4], [1, 2]]) + self.d1
# parallelogram, rotation, scale, not exact
self.d2 = [-1, 2]
self.x2 = numpy.array([[0.1, -0.2], [2.2, 0.1], [1.9, 0.8], [0.2, 1.1]])
self.y2 = numpy.array([[0., 0], [4, 2], [3, 4], [-1, 2]]) + self.d2
self.y2m = numpy.array([[0., 0], [-4, 2], [-3, 4], [1, 2]]) + self.d2
# transformations
self.af1 = Affine2D.find(x=self.x1, y=self.y1)
self.af1_gl = numpy.array([[2,-1],[1,2]])
self.af1_d = self.d1
self.af1_phi = numpy.arctan(0.5)
self.af1_scale = numpy.array([numpy.sqrt(5)] * 2)
self.af1_parity = 1
self.af1_shear = 0
self.af1m = Affine2D.find(x=self.x1, y=self.y1m)
self.af1m_gl = numpy.array([[-2,1],[1,2]])
self.af1m_d = self.af1_d
self.af1m_phi = numpy.pi - self.af1_phi
self.af1m_q = self.af1m.makeQ(phi=self.af1m_phi)
self.af1m_scale = numpy.array([numpy.sqrt(5)] * 2)
self.af1m_parity = -1
self.af1m_shear = 0
self.af2 = Affine2D.find(x=self.x2, y=self.y2)
self.af2_d = [-1.42584884, 2.05326245]
self.af2_gl = numpy.array([[2.09463865, -0.84056372],
[ 1.00406239, 1.87170871]])
self.af2_phi = 0.446990530695
self.af2_scale = numpy.array([2.32285435, 2.05115392])
self.af2m = Affine2D.find(x=self.x2, y=self.y2m)
# L-shaped u, scale, v_angle=0
self.x3 = numpy.array([[3,0], [2,0], [1,0], [1, -1]])
self.y3 = numpy.array([[-1,2], [-1,1.5], [-1,1], [1, 1]])
self.af3 = Affine2D.find(x=self.x3, y=self.y3)
self.af3.decompose(order='usv')
self.af3_uAngleDeg = 0
self.af3_vAngleDeg = 90
self.af3_scale = [2, 0.5]
self.af3_d = [-1, 0.5]
def testFindGL(self):
"""
Tests find (transform 'gl'), decompose individual parameters and
transform.
"""
##################################################
#
# parallelogram, rotation, scale, exact
#
#aff2d = Affine2D.find(x=self.x1, y=self.y1)
np_test.assert_almost_equal(self.af1.d, self.af1_d)
np_test.assert_almost_equal(self.af1.gl, self.af1_gl)
# xy_axis = 'dim_point'
aff2d_xy = Affine2D.find(
x=self.x1.transpose(), y=self.y1.transpose(), xy_axes='dim_point')
np_test.assert_almost_equal(aff2d_xy.d, self.af1_d)
np_test.assert_almost_equal(aff2d_xy.gl, self.af1_gl)
# test decompose
self.af1.decompose(order='qpsm')
np_test.assert_almost_equal(self.af1.phi, self.af1_phi)
desired_q = numpy.array(\
[[numpy.cos(self.af1_phi), -numpy.sin(self.af1_phi)],
[numpy.sin(self.af1_phi), numpy.cos(self.af1_phi)]])
np_test.assert_almost_equal(self.af1.q, desired_q)
np_test.assert_almost_equal(self.af1.p, numpy.diag([1, 1]))
np_test.assert_almost_equal(self.af1.s,
self.af1_scale * numpy.diag([1,1]))
np_test.assert_almost_equal(self.af1.m, numpy.diag([1, 1]))
# test parameters
np_test.assert_almost_equal(self.af1.scale, self.af1_scale)
np_test.assert_almost_equal(self.af1.phi, self.af1_phi)
np_test.assert_almost_equal(self.af1.parity, self.af1_parity)
np_test.assert_almost_equal(self.af1.shear, self.af1_shear)
# test transformation and error
y1_calc = self.af1.transform(self.x1)
np_test.assert_almost_equal(y1_calc, self.y1)
np_test.assert_almost_equal(self.af1.error, numpy.zeros_like(self.y1))
np_test.assert_almost_equal(self.af1.rmsError, 0)
#################################################
#
# parallelogram, scale, rotation, parity, exact
#
# test parameters
#aff2d = Affine2D.find(x=self.x1, y=self.y1m)
np_test.assert_almost_equal(self.af1m.d, self.af1m_d)
np_test.assert_almost_equal(self.af1m.gl, self.af1m_gl)
np_test.assert_almost_equal(self.af1m.scale, self.af1m_scale)
np_test.assert_almost_equal(self.af1m.phi, self.af1m_phi)
#np_test.assert_almost_equal(self.af1m.phiDeg,
# 180 - desired_phi * 180 / numpy.pi)
np_test.assert_almost_equal(self.af1m.parity, self.af1m_parity)
np_test.assert_almost_equal(self.af1m.shear, self.af1m_shear)
# test transformation and error
y1_calc = self.af1m.transform(self.x1, gl=self.af1m.gl, d=self.af1m.d)
np_test.assert_almost_equal(y1_calc, self.y1m)
np_test.assert_almost_equal(self.af1m.error, numpy.zeros_like(self.y1))
np_test.assert_almost_equal(self.af1m.rmsError, 0)
# xy_axis = 'dim_point'
af1m_xy = Affine2D.find(
x=self.x1.transpose(), y=self.y1m.transpose(), xy_axes='dim_point')
np_test.assert_almost_equal(af1m_xy.d, self.af1m_d)
np_test.assert_almost_equal(af1m_xy.gl, self.af1m_gl)
np_test.assert_almost_equal(af1m_xy.scale, self.af1m_scale)
np_test.assert_almost_equal(af1m_xy.phi, self.af1m_phi)
np_test.assert_almost_equal(af1m_xy.parity, self.af1m_parity)
np_test.assert_almost_equal(af1m_xy.shear, self.af1m_shear)
##################################################
#
# same as above but rq order
#
# test parameters
q, p, s, m = self.af1m.decompose(gl=self.af1m.gl, order='rq')
q_new = numpy.dot(numpy.dot(p, self.af1m_q), p)
np_test.assert_almost_equal(q, q_new)
np_test.assert_almost_equal(p, self.af1m.makeP(parity=self.af1m_parity))
np_test.assert_almost_equal(s, self.af1m.makeS(self.af1m_scale))
np_test.assert_almost_equal(m, self.af1m.makeM(self.af1m_shear))
# test transformation
psmq = numpy.dot(numpy.dot(p, s), numpy.dot(m, q))
y_new = numpy.inner(self.x1, psmq) + self.af1m.d
np_test.assert_almost_equal(y_new, self.y1m)
##################################################
#
# parallelogram, rotation, scale, not exact
#
aff2d = Affine2D.find(x=self.x2, y=self.y2)
# test transformation matrices and parameters
desired_d = [-1.42584884, 2.05326245]
desired_gl = numpy.array([[2.09463865, -0.84056372],
[ 1.00406239, 1.87170871]])
desired_phi = 0.446990530695
desired_scale = [2.32285435, 2.05115392]
np_test.assert_almost_equal(aff2d.d, desired_d)
np_test.assert_almost_equal(aff2d.gl, desired_gl)
np_test.assert_almost_equal(aff2d.phi, desired_phi)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.parity, 1)
np_test.assert_almost_equal(aff2d.m, [[1, 0.02198716], [0, 1]])
# test transform method
y2_calc_gl = aff2d.transform(self.x2, gl=aff2d.gl, d=aff2d.d)
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
y2_calc_qpsm = numpy.inner(self.x2, qpsm) + aff2d.d
np_test.assert_almost_equal(y2_calc_gl, y2_calc_qpsm)
#np_test.assert_almost_equal(y2_calc_gl, self.y2)
##################################################
#
# parallelogram, rotation, scale, not exact, xy_axes=dim_point
#
aff2d_xy = Affine2D.find(
x=self.x2.transpose(), y=self.y2.transpose(), xy_axes='dim_point')
# test transformation matrices and parameters
desired_d = [-1.42584884, 2.05326245]
desired_gl = numpy.array([[2.09463865, -0.84056372],
[ 1.00406239, 1.87170871]])
desired_phi = 0.446990530695
desired_scale = [2.32285435, 2.05115392]
np_test.assert_almost_equal(aff2d_xy.d, desired_d)
np_test.assert_almost_equal(aff2d_xy.gl, desired_gl)
np_test.assert_almost_equal(aff2d_xy.phi, desired_phi)
np_test.assert_almost_equal(aff2d_xy.scale, desired_scale)
np_test.assert_almost_equal(aff2d_xy.parity, 1)
np_test.assert_almost_equal(aff2d_xy.m, [[1, 0.02198716], [0, 1]])
# test transform method
y2_calc_gl = aff2d.transform(
self.x2.transpose(), gl=aff2d_xy.gl, d=aff2d_xy.d,
xy_axes='dim_point')
qpsm = numpy.dot(numpy.dot(aff2d_xy.q, aff2d_xy.p),
numpy.dot(aff2d_xy.s, aff2d_xy.m))
y2_calc_qpsm = numpy.dot(
qpsm, self.x2.transpose()) + numpy.expand_dims(aff2d_xy.d, 1)
np_test.assert_almost_equal(y2_calc_gl, y2_calc_qpsm)
#np_test.assert_almost_equal(y2_calc_gl, self.y2)
##################################################
#
# parallelogram, rotation, scale, parity, not exact
#
aff2d = Affine2D.find(x=self.x2, y=self.y2m)
# test transformation matrices and parameters
desired_d = [-0.57415116, 2.05326245]
desired_gl = numpy.array([[-2.09463865, 0.84056372],
[ 1.00406239, 1.87170871]])
desired_phi = 0.446990530695
desired_scale = [2.32285435, 2.05115392]
np_test.assert_almost_equal(aff2d.d, desired_d)
np_test.assert_almost_equal(aff2d.gl, desired_gl)
np_test.assert_almost_equal(aff2d.phi, numpy.pi - desired_phi)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.parity, -1)
np_test.assert_almost_equal(aff2d.m, [[1, 0.02198716], [0, 1]])
# test transform method
y2m_calc_gl = aff2d.transform(self.x2)
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
np_test.assert_almost_equal(qpsm, aff2d.gl)
y2m_calc_qpsm = numpy.inner(self.x2, qpsm) + aff2d.d
np_test.assert_almost_equal(y2m_calc_gl, y2m_calc_qpsm)
#np_test.assert_almost_equal(y2_calc_gl, self.y2m)
##################################################
#
# parallelogram, rotation, scale, parity, not exact, xy_axes='dim_point'
#
aff2d = Affine2D.find(
x=self.x2.transpose(), y=self.y2m.transpose(), xy_axes='dim_point')
# test transformation matrices and parameters
desired_d = [-0.57415116, 2.05326245]
desired_gl = numpy.array([[-2.09463865, 0.84056372],
[ 1.00406239, 1.87170871]])
desired_phi = 0.446990530695
desired_scale = [2.32285435, 2.05115392]
np_test.assert_almost_equal(aff2d.d, desired_d)
np_test.assert_almost_equal(aff2d.gl, desired_gl)
np_test.assert_almost_equal(aff2d.phi, numpy.pi - desired_phi)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.parity, -1)
np_test.assert_almost_equal(aff2d.m, [[1, 0.02198716], [0, 1]])
# test transform method
y2m_calc_gl = aff2d.transform(self.x2.transpose())
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
np_test.assert_almost_equal(qpsm, aff2d.gl)
y2m_calc_qpsm = (numpy.dot(qpsm, self.x2.transpose())
+ numpy.expand_dims(aff2d.d, 1))
np_test.assert_almost_equal(y2m_calc_gl, y2m_calc_qpsm)
#np_test.assert_almost_equal(y2_calc_gl, self.y2m)
##################################################
#
# L-shape: rotation, scale; check usv
#
af3 = Affine2D.find(x=self.x3, y=self.y3)
af3.decompose(order='usv')
np_test.assert_almost_equal(af3.vAngleDeg, 90)
np_test.assert_almost_equal(af3.uAngleDeg, 0)
np_test.assert_almost_equal(af3.scale, [2, 0.5])
np_test.assert_almost_equal(af3.scaleAngle, numpy.arccos(0.25))
np_test.assert_almost_equal(af3.d, self.af3_d)
def testFindRS(self):
"""
Tests find (transform 'rs'), decompose individual parameters and
transform.
"""
###############################################
#
# parallelogram, rotation, scale, exact
#
aff2d = Affine2D.find(x=self.x1, y=self.y1, type_='rs')
np_test.assert_almost_equal(aff2d.d, self.d1)
# test finding transformation
desired_phi = numpy.arctan(0.5)
desired_scale = [numpy.sqrt(5)] * 2
desired_q = numpy.array(\
[[numpy.cos(desired_phi), -numpy.sin(desired_phi)],
[numpy.sin(desired_phi), numpy.cos(desired_phi)]])
np_test.assert_almost_equal(aff2d.parity, 1)
#np_test.assert_almost_equal(aff2d.phi, desired_phi)
#np_test.assert_almost_equal(aff2d.q, desired_q)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.error, numpy.zeros_like(self.y1))
# test doing transformation
y1_calc = aff2d.transform(self.x1)
np_test.assert_almost_equal(y1_calc, self.y1)
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
y_new = numpy.inner(self.x1, qpsm) + aff2d.d
np_test.assert_almost_equal(y_new, self.y1)
###############################################
#
# parallelogram, rotation, scale, parity, exact
#
aff2d = Affine2D.find(x=self.x1, y=self.y1m, type_='rs')
np_test.assert_almost_equal(aff2d.d, self.d1)
# test finding transformation
desired_phi = numpy.arctan(0.5)
desired_scale = [numpy.sqrt(5)] * 2
desired_q = numpy.array(\
[[-numpy.cos(desired_phi), -numpy.sin(desired_phi)],
[numpy.sin(desired_phi), -numpy.cos(desired_phi)]])
np_test.assert_almost_equal(aff2d.phi, numpy.pi - desired_phi)
np_test.assert_almost_equal(aff2d.q, desired_q)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.parity, -1)
np_test.assert_almost_equal(aff2d.error, numpy.zeros_like(self.y1))
# test doing transformation
y1_calc = aff2d.transform(self.x1)
np_test.assert_almost_equal(y1_calc, self.y1m)
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
y_new = numpy.inner(self.x1, qpsm) + aff2d.d
np_test.assert_almost_equal(y_new, self.y1m)
###############################################
#
# parallelogram, rotation, scale, parity, exact, xy_axes='dim_point'
#
aff2d = Affine2D.find(
x=self.x1.transpose(), y=self.y1m.transpose(), type_='rs',
xy_axes='dim_point')
np_test.assert_almost_equal(aff2d.d, self.d1)
# test finding transformation
desired_phi = numpy.arctan(0.5)
desired_scale = [numpy.sqrt(5)] * 2
desired_q = numpy.array(\
[[-numpy.cos(desired_phi), -numpy.sin(desired_phi)],
[numpy.sin(desired_phi), -numpy.cos(desired_phi)]])
np_test.assert_almost_equal(aff2d.phi, numpy.pi - desired_phi)
np_test.assert_almost_equal(aff2d.q, desired_q)
np_test.assert_almost_equal(aff2d.scale, desired_scale)
np_test.assert_almost_equal(aff2d.parity, -1)
np_test.assert_almost_equal(
aff2d.error, numpy.zeros_like(self.y1.transpose()))
# test doing transformation
y1_calc = aff2d.transform(self.x1.transpose())
np_test.assert_almost_equal(y1_calc, self.y1m.transpose())
qpsm = numpy.dot(numpy.dot(aff2d.q, aff2d.p),
numpy.dot(aff2d.s, aff2d.m))
y_new = (numpy.dot(qpsm, self.x1.transpose())
+ numpy.expand_dims(aff2d.d, 1))
np_test.assert_almost_equal(y_new, self.y1m.transpose())
###############################################
#
# parallelogram, rotation, scale, parity not exact
#
af2m = Affine2D.find(x=self.x2, y=self.y2m)
af2mrs = Affine2D.find(x=self.x2, y=self.y2m, type_='rs')
# test finding transformation
desired_d = [-0.57415116, 2.05326245]
desired_phi = numpy.pi - 0.442817288965
desired_scale = [2.18278075] * 2
desired_q = numpy.array(\
[[numpy.cos(desired_phi), -numpy.sin(desired_phi)],
[numpy.sin(desired_phi), numpy.cos(desired_phi)]])
#np_test.assert_almost_equal(af2mrs.d, desired_d)
np_test.assert_almost_equal(af2mrs.phi, desired_phi)
np_test.assert_almost_equal(af2mrs.scale, desired_scale)
np_test.assert_almost_equal(af2mrs.parity, -1)
# compare with gl
#np_test.assert_almost_equal(af2mrs.d, af2m.d)
np_test.assert_almost_equal(af2mrs.scale, af2m.scale, decimal=1)
np_test.assert_almost_equal(af2mrs.phi, af2m.phi, decimal=2)
np_test.assert_almost_equal(af2mrs.parity, af2m.parity)
np_test.assert_almost_equal(af2mrs.error, af2m.error, decimal=0)
np_test.assert_almost_equal(af2mrs.rmsError, af2m.rmsError, decimal=1)
# test doing transformation
y2_calc = af2mrs.transform(self.x2)
qpsm = numpy.dot(numpy.dot(af2mrs.q, af2mrs.p),
numpy.dot(af2mrs.s, af2mrs.m))
np_test.assert_almost_equal(qpsm, af2mrs.gl)
y2_calc_qpsm = numpy.inner(self.x2, qpsm) + af2mrs.d
np_test.assert_almost_equal(y2_calc, y2_calc_qpsm)
###############################################
#
# parallelogram, rotation, scale, parity not exact, xy_axes='dim_point'
#
af2m = Affine2D.find(x=self.x2.transpose(), y=self.y2m.transpose())
af2mrs = Affine2D.find(
x=self.x2.transpose(), y=self.y2m.transpose(), type_='rs',
xy_axes='dim_point')
# test finding transformation
desired_phi = numpy.pi - 0.442817288965
desired_scale = [2.18278075] * 2
desired_q = numpy.array(\
[[numpy.cos(desired_phi), -numpy.sin(desired_phi)],
[numpy.sin(desired_phi), numpy.cos(desired_phi)]])
np_test.assert_almost_equal(af2mrs.phi, desired_phi)
np_test.assert_almost_equal(af2mrs.scale, desired_scale)
np_test.assert_almost_equal(af2mrs.parity, -1)
def testInverse(self):
"""
Tests inverse
"""
###############################################
#
# parallelogram, rotation, scale not exact
#
af2 = Affine2D.find(x=self.x2, y=self.y2)
af2rs = Affine2D.find(x=self.x2, y=self.y2, type_='rs')
af2rsi = Affine2D.find(y=self.x2, x=self.y2, type_='rs')
af2rs_inv = af2rs.inverse()
af2rs_inv.decompose(order='qpsm')
# tests inverse method
np_test.assert_almost_equal(af2rs_inv.phi, -af2rs.phi)
np_test.assert_almost_equal(af2rs_inv.scale, 1/af2rs.scale)
np_test.assert_almost_equal(af2rs_inv.parity, af2rs.parity)
# tests inversed x and y
np_test.assert_almost_equal(af2rsi.phi, -af2rs.phi)
np_test.assert_almost_equal(af2rsi.scale, 1/af2rs.scale, decimal=1)
np_test.assert_almost_equal(af2rsi.parity, af2rs.parity)
###############################################
#
# parallelogram, rotation, scale, parity not exact
#
af2m = Affine2D.find(x=self.x2, y=self.y2m)
af2mrs = Affine2D.find(x=self.x2, y=self.y2m, type_='rs')
af2mrsi = Affine2D.find(y=self.x2, x=self.y2m, type_='rs')
af2mrs_inv = af2mrs.inverse()
af2mrs_inv.decompose(order='qpsm')
# tests inverse method
np_test.assert_almost_equal(af2mrs_inv.phi, af2mrs.phi)
np_test.assert_almost_equal(af2mrs_inv.scale, 1/af2mrs.scale)
np_test.assert_almost_equal(af2mrs_inv.parity, af2mrs.parity)
# tests inversed x and y
np_test.assert_almost_equal(af2mrsi.phi, af2mrs.phi)
np_test.assert_almost_equal(af2mrsi.scale, 1/af2mrs.scale, decimal=1)
np_test.assert_almost_equal(af2mrsi.parity, af2mrs.parity)
def testCompose(self):
"""
Tests compose
"""
af11 = Affine2D.compose(self.af1, self.af1)
af11.decompose(order='qpsm')
np_test.assert_almost_equal(af11.phi, 2 * self.af1_phi)
np_test.assert_almost_equal(af11.scale,
self.af1_scale * self.af1_scale)
np_test.assert_almost_equal(af11.parity, 1)
np_test.assert_almost_equal(af11.rmsErrorEst,
numpy.sqrt(2) * self.af1.error)
af11m = Affine2D.compose(self.af1, self.af1m)
af11m.decompose(order='qpsm')
## This was risen in testing. Added a "-"
# E AssertionError:
# E Arrays are not almost equal to 7 decimals
# E ACTUAL: -3.1415926535897931
# E DESIRED: 3.1415926535897931
## Before:
# np_test.assert_almost_equal(af11m.phi, self.af1_phi + self.af1m_phi)
## After:
np_test.assert_almost_equal(-af11m.phi, self.af1_phi + self.af1m_phi)
np_test.assert_almost_equal(af11m.scale,
self.af1_scale * self.af1m_scale)
np_test.assert_almost_equal(af11m.parity,
self.af1_parity * self.af1m_parity)
np_test.assert_almost_equal(af11m.rmsErrorEst,
numpy.sqrt(2) * self.af1.error)
# test rms error
af12 = Affine2D.compose(self.af1, self.af2)
self.af1.decompose(order='qpsm')
np_test.assert_almost_equal(af12.rmsErrorEst,
self.af1.scale[0] * self.af2.rmsError)
af21 = Affine2D.compose(self.af2, self.af1)
np_test.assert_almost_equal(af21.rmsErrorEst, self.af2.rmsError)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAffine2D)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 |
REMEXLabs/SmartKitchen-BLE-Gateway | src/rest_utility.py | 1 | 5462 | import base64
import json
import logging
import time
import threading
import requests
# This is designed for OpenHab
# TODO: Refactor to work with arbitrary REST Server
class OpenHabRestInterface(threading.Thread):
prev_state = {} # stores item states
update = False
def __init__(self, host, port, user, pwd, group, queue):
self.host = host
self.port = port
self.user = user
self.pwd = pwd
self.auth = base64.encodestring("%s:%s" % (self.user,
self.pwd)).replace("\n", "")
self.basic_header = {
"Authorization": "Basic %s" % self.auth,
"Content-type": "text/plain"
} # Header for basic connections (returns only text)
self.polling_header = {
"Authorization": "Basic %s" % self.auth,
"Accept": "application/json"
} # Header for polling (returns json object)
self.add_header = {
"Authorization": "Basic %s" % self.auth,
"Accept": "application/json",
"Content-Type": "application/json"
} # Header for adding items
# NULL Logger if none is set
self.logger = logging.getLogger("NULL")
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.NOTSET)
self.args = (group, queue)
threading.Thread.__init__(
self, target=self.poll_status, args=self.args)
# If you want logging you can set the logger here
def set_logger(self, logger_name):
self.logger = logging.getLogger(logger_name)
# Returns the state of the specified item
def get_item_state(self, item):
retval = requests.get("http://" + self.host + ":" + str(self.port) +
"/rest/items/" + item + "/state")
if retval.status_code != requests.codes.ok:
self.logger.error("GET returned: %s" % retval.status_code)
return None
else:
value = retval.text
self.prev_state[item] = value
self.logger.info(item + ": " + str(value))
return value
# Updates the state of the specified item
def update_item_state(self, item, state, no_update=False):
openhab_url = "http://%s:%s/rest/items/%s/state" % (self.host,
self.port, item)
retval = requests.put(openhab_url,
data=str(state),
headers=self.basic_header)
if retval.status_code != requests.codes.accepted:
self.logger.error("PUT returned : %s for item: %s" %
(retval.status_code, item))
return False
# Add to prev_state to prevent endless loops
if not no_update:
self.prev_state[item] = state
return True
# Polls all Members of a Group and queues new values
def poll_status(self, group, queue):
self.update = True
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, group)
param = {"type": "json"}
while self.update:
queue.join() # Wait until queue is empty
retval = requests.get(url,
params=param,
headers=self.polling_header)
if retval.status_code != requests.codes.ok:
self.logger.error("GET returned: %s for Group:%s" %
(retval.status_code, group))
time.sleep(0.5)
continue
# Get all items in the group and check for new values
for member in retval.json()["members"]:
item = member["name"]
state = member["state"]
if item in self.prev_state:
if state != self.prev_state[item]:
self.logger.debug("New State of %s: %s" %
(item, state))
queue.put({item: state})
else:
queue.put({item: state})
self.prev_state[item] = state
time.sleep(0.5)
# Add a new item to openHab
def add_item(self, name, item_type, label, category, group):
# Construct the new Item
item = {
"name": name,
"type": item_type,
"label": label,
"category": category,
"groupNames": [group]
}
item_json = json.dumps(item) # create json
# Push new Item and return success/failure
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, name)
for i in range(0, 2): # Try 3 times if failure
retval = requests.put(url, data=item_json, headers=self.add_header)
if retval.status_code != requests.codes.ok:
if i == 2:
self.logger.error("PUT returned: %s" % retval.status_code)
return False
else:
break
return True
# Delete Item from openHab
def delete_item(self, name):
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, name)
retval = requests.delete(url)
if retval.status_code != requests.codes.ok:
self.logger.error("DELETE returned: %s" % retval.status_code)
return False
return True
| apache-2.0 |
lukauskas/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
keshashah/GCompris | src/administration-activity/admin/profile_edit.py | 1 | 4836 | # gcompris - profile_edit.py
#
# Copyright (C) 2005, 2008 Bruno Coudoin and Yves Combe
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import gtk
from gcompris import gcompris_gettext as _
import profile_widget
class ProfileEdit(gtk.Window):
def __init__(self, db_connect, db_cursor,
profile_id, profile_name, profile_description,
profile_list):
# Create the toplevel window
gtk.Window.__init__(self)
self.profile_list = profile_list;
self.set_title(_("Editing a Profile"))
self.set_border_width(8)
self.set_default_size(320, 350)
if(profile_name):
frame = gtk.Frame(_("Editing profile: ") + profile_name)
new_profile = False
else:
frame = gtk.Frame(_("Editing a new profile"))
new_profile = True
profile_name =""
profile_description = ""
self.add(frame)
vbox = gtk.VBox(False, 8)
vbox.set_border_width(8)
frame.add(vbox)
# Label and Entry for the group and description
table = gtk.Table(2, 2, homogeneous=False)
table.set_border_width(0)
table.set_row_spacings(0)
table.set_col_spacings(20)
vbox.pack_start(table, True, True, 0)
label = gtk.Label(_('Profile:'))
label.set_alignment(0, 0)
table.attach(label, 0, 1, 0, 1, xoptions=gtk.SHRINK, yoptions=gtk.EXPAND)
self.entry_profile = gtk.Entry()
self.entry_profile.set_max_length(20)
self.entry_profile.insert_text(profile_name, position=0)
table.attach(self.entry_profile, 1, 2, 0, 1,
xoptions=gtk.SHRINK, yoptions=gtk.EXPAND)
# FIXME: How to remove the selection
# Label and Entry for the first name
label = gtk.Label(_('Description:'))
label.set_alignment(0, 0)
table.attach(label, 0, 1, 1, 2, xoptions=gtk.SHRINK, yoptions=gtk.EXPAND)
self.entry_description = gtk.Entry()
self.entry_description.set_max_length(30)
self.entry_description.insert_text(profile_description, position=0)
table.attach(self.entry_description, 1, 2, 1, 2,
xoptions=gtk.SHRINK, yoptions=gtk.EXPAND)
# Top message gives instructions
label = gtk.Label(_('Assign all the groups belonging to this profile'))
vbox.pack_start(label, False, False, 0)
vbox.pack_start(gtk.HSeparator(), False, False, 0)
# Lower area
self.profile_widget = profile_widget.ProfileWidget(db_connect,
db_cursor, profile_id, new_profile)
vbox.pack_start(self.profile_widget, True, True, 0)
# Confirmation Buttons
# --------------------
vbox.pack_start(gtk.HSeparator(), False, False, 0)
bbox = gtk.HBox(homogeneous=False, spacing=8)
button = gtk.Button(stock='gtk-ok')
bbox.pack_end(button, expand=False, fill=False, padding=0)
button.connect("clicked", self.ok)
button = gtk.Button(stock='gtk-close')
bbox.pack_end(button, expand=False, fill=False, padding=0)
button.connect("clicked", self.close)
vbox.pack_start(bbox, False, False, 0)
# Ready GO
self.show_all()
# Done, can quit this dialog
#
def close(self, button):
self.profile_list.reload_profile()
self.destroy()
# Done, can quit this dialog with saving
#
def ok(self, button):
# Tell the user he must provide enough information
if(self.entry_profile.get_text().strip() == ""):
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_("You need to provide at least a name for your profile"))
dialog.run()
dialog.destroy()
return
self.profile_widget.ok(self.entry_profile.get_text().strip(),
self.entry_description.get_text())
# Close the dialog window now
self.profile_list.reload_profile()
self.destroy()
| gpl-2.0 |
saitodisse/zulip | api/integrations/svn/zulip_svn_config.py | 124 | 2363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "svn-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the svn repository on the server
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for the "evil-master-plan"
# and "my-super-secret-repository" repos to
# * stream "commits"
# * topic "branch_name"
def commit_notice_destination(path, commit):
repo = path.split('/')[-1]
if repo not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "commits",
subject = u"%s" % (repo,))
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip server's API URI
ZULIP_SITE = "https://api.zulip.com"
| apache-2.0 |
karthikvadla16/spark-tk | integration-tests/tests/test_frame_binary_classification_metrics.py | 12 | 4932 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc, rm, get_sandbox_path
import logging
logger = logging.getLogger(__name__)
# Tests binary classification with string values
def test_binary_classification_metrics_001(tc):
logger.info("create frame")
rows = [["red", "red"],["blue", "green"],["green", "green"],["green", "green"]]
schema = [('labels', str),('predictions', str)]
frame = tc.frame.create(rows, schema)
assert(frame.count(), 4, "frame should have 4 rows")
assert(frame.column_names, ['labels', 'predictions'])
logger.info("compute binary_classification_metrics()")
cm = frame.binary_classification_metrics('labels', 'predictions', 'green', 1)
assert(cm.f_measure, 0.0, "computed f_measure for this model should be equal to 0.0")
assert(cm.recall, 0.0, "computed recall for this model should be equal to 0.0")
assert(cm.accuracy, 0.5, "computed accuracy for this model should be equal to 0.5")
assert(cm.precision, 0.0, "computed precision for this model should be equal to 0.0")
confusion_matrix = cm.confusion_matrix.values.tolist()
assert(confusion_matrix, [[0, 2], [0, 2]], "computed confusion_matrix for this models should be equal to [[0, 2], [0, 2]]")
# Tests binary classification with float values
def test_binary_classification_metrics_002(tc):
logger.info("create frame")
rows = [[0.0, 0.0],[1.5, 0.0],[0.0, 0.0],[1.5, 1.5]]
schema = [('labels', float),('predictions', float)]
frame = tc.frame.create(rows, schema)
assert(frame.count(), 4, "frame should have 4 rows")
assert(frame.column_names, ['labels', 'predictions'])
logger.info("compute binary_classification_metrics()")
cm = frame.binary_classification_metrics('labels', 'predictions', 1.5, 1)
assert(cm.f_measure, 0.66666666666666663, "computed f_measure for this model should be equal to 0.66666666666666663")
assert(cm.recall, 0.5, "computed recall for this model should be equal to 0.5")
assert(cm.accuracy, 0.75, "computed accuracy for this model should be equal to 0.75")
assert(cm.precision, 1.0, "computed precision for this model should be equal to 1.0")
confusion_matrix = cm.confusion_matrix.values.tolist()
assert(confusion_matrix, [[1, 1], [0, 2]], "computed confusion_matrix for this models should be equal to [[1, 1], [0, 2]]")
# Tests binary classification with data that includes missing values and having None as the positive label
def test_binary_classification_metrics_003(tc):
logger.info("create frame")
rows = [[0.0, 0.0],[1.5, None],[None, None],[1.5, 1.5]]
schema = [('labels', float),('predictions', float)]
frame = tc.frame.create(rows, schema)
assert(frame.count(), 4, "frame should have 4 rows")
assert(frame.column_names, ['labels', 'predictions'])
logger.info("compute binary_classification_metrics()")
cm = frame.binary_classification_metrics('labels', 'predictions', 1.5, 1)
assert(cm.f_measure, 0.66666666666666663, "computed f_measure for this model should be equal to 0.66666666666666663")
assert(cm.recall, 0.5, "computed recall for this model should be equal to 0.5")
assert(cm.accuracy, 0.75, "computed accuracy for this model should be equal to 0.75")
assert(cm.precision, 1.0, "computed precision for this model should be equal to 1.0")
confusion_matrix = cm.confusion_matrix.values.tolist()
assert(confusion_matrix, [[1, 1], [0, 2]], "computed confusion_matrix for this models should be equal to [[1, 1], [0, 2]]")
logger.info("compute binary_classification_metrics() where the positive label is None.")
cm = frame.binary_classification_metrics('labels', 'predictions', None)
assert(cm.f_measure, 0.666666666667, "computed f_measure for this model should be equal to 0.666666666667")
assert(cm.recall, 1.0, "computed recall for this model should be equal to 1.0")
assert(cm.accuracy, 0.75, "computed accuracy for this model should be equal to 0.75")
assert(cm.precision, 0.5, "computed precision for this model should be equal to 0.5")
confusion_matrix = cm.confusion_matrix.values.tolist()
assert(confusion_matrix, [[1, 0], [1, 2]], "computed confusion_matrix for this models should be equal to [[1, 0], [1, 2]]")
| apache-2.0 |
MyAOSP/external_chromium_org | third_party/tlslite/tlslite/utils/xmltools.py | 259 | 7358 | """Helper functions for XML.
This module has misc. helper functions for working with XML DOM nodes."""
import re
from compat import *
import os
if os.name != "java":
from xml.dom import minidom
from xml.sax import saxutils
def parseDocument(s):
return minidom.parseString(s)
else:
from javax.xml.parsers import *
import java
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder()
def parseDocument(s):
stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes())
return builder.parse(stream)
def parseAndStripWhitespace(s):
try:
element = parseDocument(s).documentElement
except BaseException, e:
raise SyntaxError(str(e))
stripWhitespace(element)
return element
#Goes through a DOM tree and removes whitespace besides child elements,
#as long as this whitespace is correctly tab-ified
def stripWhitespace(element, tab=0):
element.normalize()
lastSpacer = "\n" + ("\t"*tab)
spacer = lastSpacer + "\t"
#Zero children aren't allowed (i.e. <empty/>)
#This makes writing output simpler, and matches Canonical XML
if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython
raise SyntaxError("Empty XML elements not allowed")
#If there's a single child, it must be text context
if element.childNodes.length==1:
if element.firstChild.nodeType == element.firstChild.TEXT_NODE:
#If it's an empty element, remove
if element.firstChild.data == lastSpacer:
element.removeChild(element.firstChild)
return
#If not text content, give an error
elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
else:
raise SyntaxError("Unexpected node type in XML document")
#Otherwise there's multiple child element
child = element.firstChild
while child:
if child.nodeType == child.ELEMENT_NODE:
stripWhitespace(child, tab+1)
child = child.nextSibling
elif child.nodeType == child.TEXT_NODE:
if child == element.lastChild:
if child.data != lastSpacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
elif child.data != spacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
next = child.nextSibling
element.removeChild(child)
child = next
else:
raise SyntaxError("Unexpected node type in XML document")
def checkName(element, name):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Missing element: '%s'" % name)
if name == None:
return
if element.tagName != name:
raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName))
def getChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
checkName(child, name)
return child
def getChildIter(element, index):
class ChildIter:
def __init__(self, element, index):
self.element = element
self.index = index
def next(self):
if self.index < len(self.element.childNodes):
retVal = self.element.childNodes.item(self.index)
self.index += 1
else:
retVal = None
return retVal
def checkEnd(self):
if self.index != len(self.element.childNodes):
raise SyntaxError("Too many elements under: '%s'" % self.element.tagName)
return ChildIter(element, index)
def getChildOrNone(element, index):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
return child
def getLastChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getLastChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
if child != element.lastChild:
raise SyntaxError("Too many elements under: '%s'" % element.tagName)
checkName(child, name)
return child
#Regular expressions for syntax-checking attribute and element content
nsRegEx = "http://trevp.net/cryptoID\Z"
cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z"
urlRegEx = "http(s)?://.{1,100}\Z"
sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z"
base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z"
certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z"
keyRegEx = "[A-Z]\Z"
keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z"
dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z"
shortStringRegEx = ".{1,100}\Z"
exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z"
notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1
booleanRegEx = "(true)|(false)"
def getReqAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getReqAttribute()")
value = element.getAttribute(attrName)
if not value:
raise SyntaxError("Missing Attribute: " + attrName)
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def getAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getAttribute()")
value = element.getAttribute(attrName)
if value:
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def checkNoMoreAttributes(element):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in checkNoMoreAttributes()")
if element.attributes.length!=0:
raise SyntaxError("Extra attributes on '%s'" % element.tagName)
def getText(element, regEx=""):
textNode = element.firstChild
if textNode == None:
raise SyntaxError("Empty element '%s'" % element.tagName)
if textNode.nodeType != textNode.TEXT_NODE:
raise SyntaxError("Non-text node: '%s'" % element.tagName)
if not re.match(regEx, textNode.data):
raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data))
return str(textNode.data) #de-unicode it; this is needed for bsddb, for example
#Function for adding tabs to a string
def indent(s, steps, ch="\t"):
tabs = ch*steps
if s[-1] != "\n":
s = tabs + s.replace("\n", "\n"+tabs)
else:
s = tabs + s.replace("\n", "\n"+tabs)
s = s[ : -len(tabs)]
return s
def escape(s):
return saxutils.escape(s)
| bsd-3-clause |
pkuyym/Paddle | python/paddle/dataset/flowers.py | 3 | 7010 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module will download dataset from
http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html
and parse train/test set intopaddle reader creators.
This set contains images of flowers belonging to 102 different categories.
The images were acquired by searching the web and taking pictures. There are a
minimum of 40 images for each category.
The database was used in:
Nilsback, M-E. and Zisserman, A. Automated flower classification over a large
number of classes.Proceedings of the Indian Conference on Computer Vision,
Graphics and Image Processing (2008)
http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}.
"""
import cPickle
import itertools
import functools
from common import download
import tarfile
import scipy.io as scio
from paddle.dataset.image import *
from paddle.reader import *
import os
import numpy as np
from multiprocessing import cpu_count
__all__ = ['train', 'test', 'valid']
DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz'
LABEL_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat'
SETID_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat'
DATA_MD5 = '33bfc11892f1e405ca193ae9a9f2a118'
LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d'
SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c'
# In official 'readme', tstid is the flag of test data
# and trnid is the flag of train data. But test data is more than train data.
# So we exchange the train data and test data.
TRAIN_FLAG = 'tstid'
TEST_FLAG = 'trnid'
VALID_FLAG = 'valid'
def default_mapper(is_train, sample):
'''
map image bytes data to type needed by model input layer
'''
img, label = sample
img = load_image_bytes(img)
img = simple_transform(
img, 256, 224, is_train, mean=[103.94, 116.78, 123.68])
return img.flatten().astype('float32'), label
train_mapper = functools.partial(default_mapper, True)
test_mapper = functools.partial(default_mapper, False)
def reader_creator(data_file,
label_file,
setid_file,
dataset_name,
mapper,
buffered_size=1024,
use_xmap=True):
'''
1. read images from tar file and
merge images into batch files in 102flowers.tgz_batch/
2. get a reader to read sample from batch file
:param data_file: downloaded data file
:type data_file: string
:param label_file: downloaded label file
:type label_file: string
:param setid_file: downloaded setid file containing information
about how to split dataset
:type setid_file: string
:param dataset_name: data set name (tstid|trnid|valid)
:type dataset_name: string
:param mapper: a function to map image bytes data to type
needed by model input layer
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:return: data reader
:rtype: callable
'''
labels = scio.loadmat(label_file)['labels'][0]
indexes = scio.loadmat(setid_file)[dataset_name][0]
img2label = {}
for i in indexes:
img = "jpg/image_%05d.jpg" % i
img2label[img] = labels[i - 1]
file_list = batch_images_from_tar(data_file, dataset_name, img2label)
def reader():
for file in open(file_list):
file = file.strip()
batch = None
with open(file, 'r') as f:
batch = cPickle.load(f)
data = batch['data']
labels = batch['label']
for sample, label in itertools.izip(data, batch['label']):
yield sample, int(label) - 1
if use_xmap:
return xmap_readers(mapper, reader, cpu_count(), buffered_size)
else:
return map_readers(mapper, reader)
def train(mapper=train_mapper, buffered_size=1024, use_xmap=True):
'''
Create flowers training set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:return: train data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper,
buffered_size, use_xmap)
def test(mapper=test_mapper, buffered_size=1024, use_xmap=True):
'''
Create flowers test set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:return: test data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper,
buffered_size, use_xmap)
def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True):
'''
Create flowers validation set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:return: test data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, mapper,
buffered_size, use_xmap)
def fetch():
download(DATA_URL, 'flowers', DATA_MD5)
download(LABEL_URL, 'flowers', LABEL_MD5)
download(SETID_URL, 'flowers', SETID_MD5)
| apache-2.0 |
mbernasocchi/QGIS | python/plugins/processing/algs/grass7/ext/v_distance.py | 45 | 2340 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_distance.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from qgis.core import QgsProcessingParameterDefinition
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
# Verifiy that we have the good number of columns
uploads = alg.parameterAsEnums(parameters, 'upload', context)
columns = alg.parameterAsFields(parameters, 'column', context)
if len(columns) != len(uploads):
return False, alg.tr(u"The number of columns and the number of upload parameters should be equal!")
return True, None
def processCommand(alg, parameters, context, feedback):
# We need to disable only from_output parameter
fromOutput = alg.parameterDefinition('from_output')
fromOutput.setFlags(fromOutput.flags() | QgsProcessingParameterDefinition.FlagHidden)
alg.processCommand(parameters, context, feedback, False)
fromOutput.setFlags(fromOutput.flags() | QgsProcessingParameterDefinition.FlagHidden)
def processOutputs(alg, parameters, context, feedback):
alg.vectorOutputType(parameters, context)
alg.exportVectorLayerFromParameter('output', parameters, context)
# for from_output, we export the initial layer
fileName = alg.parameterAsOutputLayer(parameters, 'from_output', context)
grassName = alg.exportedLayers['from']
alg.exportVectorLayer(grassName, fileName)
| gpl-2.0 |
blueburningcoder/nupic | examples/opf/experiments/anomaly/temporal/hotgym/description.py | 6 | 16403 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
},
'anomalyParams': {
'mode': 'likelihood', # pure(=default) / weighted / likelihood
'slidingWindowSize': 5, # >=0 / None
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1, 5],
'predictedField': 'consumption',
'numRecords': 4000,
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'last_record': config['numRecords'],
u'source': u'file://extra/hotgym/hotgym.csv'}],
'aggregation': config['aggregationInfo'],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*aae.*'],
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
paulmartel/voltdb | third_party/cpp/googletest/googletest/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
willingc/oh-mainline | vendor/packages/Django/django/utils/importlib.py | 124 | 1228 | # Taken from Python 2.7 with permission from/by the original author.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| agpl-3.0 |
MycChiu/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 96 | 8140 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
ds = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
ds.Normal,
ds.Bernoulli,
ds.Beta,
ds.Chi2,
ds.Exponential,
ds.Gamma,
ds.InverseGamma,
ds.Laplace,
ds.StudentT,
ds.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = ds.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = ds.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = ds.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = ds.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = ds.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(ds.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rs2/bokeh | examples/models/server/widgets.py | 3 | 2268 | from __future__ import print_function
from datetime import date
import random
random.seed(1)
from bokeh.client import push_session
from bokeh.io import curdoc
from bokeh.models.glyphs import Line, Circle
from bokeh.models import (
Plot, ColumnDataSource, DataRange1d,
LinearAxis, DatetimeAxis, Grid, HoverTool
)
from bokeh.models.widgets import (
Button, TableColumn, DataTable,
DateEditor, DateFormatter, IntEditor)
from bokeh.models.layouts import WidgetBox, Column
N = 5
def make_data():
return dict(
dates=[ date(2014, 3, i+1) for i in range(N) ],
downloads=[ random.randint(0, 100) for i in range(N) ],
)
source = ColumnDataSource(make_data())
def make_plot():
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=400, plot_height=400)
plot.title.text = "Product downloads"
line = Line(x="dates", y="downloads", line_color="blue")
plot.add_glyph(source, line)
circle = Circle(x="dates", y="downloads", fill_color="red")
plot.add_glyph(source, circle)
xaxis = DatetimeAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
plot.add_tools(HoverTool(tooltips=dict(downloads="@downloads")))
return plot, source
def click_handler():
source.data = make_data()
def make_layout():
plot, source = make_plot()
columns = [
TableColumn(field="dates", title="Date", editor=DateEditor(), formatter=DateFormatter()),
TableColumn(field="downloads", title="Downloads", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, width=400, height=400, editable=True)
button = Button(label="Randomize data", button_type="success")
button.on_click(click_handler)
buttons = WidgetBox(children=[button], width=400)
column = Column(children=[buttons, plot, data_table])
return column
layout = make_layout()
document = curdoc()
document.add_root(layout)
if __name__ == "__main__":
print("\npress ctrl-C to exit")
session = push_session(document)
session.show()
session.loop_until_closed()
| bsd-3-clause |
40223101/w17test | static/Brython3.1.0-20150301-090019/Lib/fractions.py | 722 | 23203 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f):
raise ValueError("Cannot convert %r to %s." % (f, cls.__name__))
if math.isinf(f):
raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if dec.is_infinite():
raise OverflowError(
"Cannot convert %s to %s." % (dec, cls.__name__))
if dec.is_nan():
raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| gpl-3.0 |
vismartltd/edx-platform | lms/djangoapps/shoppingcart/pdf.py | 103 | 18493 | """
Template for PDF Receipt/Invoice Generation
"""
from PIL import Image
import logging
from reportlab.lib import colors
from django.conf import settings
from django.utils.translation import ugettext as _
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import mm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph
from reportlab.platypus.tables import Table, TableStyle
from microsite_configuration import microsite
from xmodule.modulestore.django import ModuleI18nService
log = logging.getLogger("PDF Generation")
class NumberedCanvas(Canvas): # pylint: disable=abstract-method
"""
Canvas child class with auto page-numbering.
"""
def __init__(self, *args, **kwargs):
"""
__init__
"""
Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def insert_page_break(self):
"""
Starts a new page.
"""
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def current_page_count(self):
"""
Returns the page count in the current pdf document.
"""
return len(self._saved_page_states) + 1
def save(self):
"""
Adds page numbering to each page (page x of y)
"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
if num_pages > 1:
self.draw_page_number(num_pages)
Canvas.showPage(self)
Canvas.save(self)
def draw_page_number(self, page_count):
"""
Draws the String "Page x of y" at the bottom right of the document.
"""
self.setFontSize(7)
self.drawRightString(
200 * mm,
12 * mm,
_("Page {page_number} of {page_count}").format(page_number=self._pageNumber, page_count=page_count)
)
class PDFInvoice(object):
"""
PDF Generation Class
"""
def __init__(self, items_data, item_id, date, is_invoice, total_cost, payment_received, balance):
"""
Accepts the following positional arguments
items_data - A list having the following items for each row.
item_description - String
quantity - Integer
list_price - float
discount - float
item_total - float
id - String
date - datetime
is_invoice - boolean - True (for invoice) or False (for Receipt)
total_cost - float
payment_received - float
balance - float
"""
# From settings
self.currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
self.logo_path = microsite.get_value("PDF_RECEIPT_LOGO_PATH", settings.PDF_RECEIPT_LOGO_PATH)
self.cobrand_logo_path = microsite.get_value(
"PDF_RECEIPT_COBRAND_LOGO_PATH", settings.PDF_RECEIPT_COBRAND_LOGO_PATH
)
self.tax_label = microsite.get_value("PDF_RECEIPT_TAX_ID_LABEL", settings.PDF_RECEIPT_TAX_ID_LABEL)
self.tax_id = microsite.get_value("PDF_RECEIPT_TAX_ID", settings.PDF_RECEIPT_TAX_ID)
self.footer_text = microsite.get_value("PDF_RECEIPT_FOOTER_TEXT", settings.PDF_RECEIPT_FOOTER_TEXT)
self.disclaimer_text = microsite.get_value("PDF_RECEIPT_DISCLAIMER_TEXT", settings.PDF_RECEIPT_DISCLAIMER_TEXT)
self.billing_address_text = microsite.get_value(
"PDF_RECEIPT_BILLING_ADDRESS", settings.PDF_RECEIPT_BILLING_ADDRESS
)
self.terms_conditions_text = microsite.get_value(
"PDF_RECEIPT_TERMS_AND_CONDITIONS", settings.PDF_RECEIPT_TERMS_AND_CONDITIONS
)
self.brand_logo_height = microsite.get_value(
"PDF_RECEIPT_LOGO_HEIGHT_MM", settings.PDF_RECEIPT_LOGO_HEIGHT_MM
) * mm
self.cobrand_logo_height = microsite.get_value(
"PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM", settings.PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM
) * mm
# From Context
self.items_data = items_data
self.item_id = item_id
self.date = ModuleI18nService().strftime(date, 'SHORT_DATE')
self.is_invoice = is_invoice
self.total_cost = '{currency}{amount:.2f}'.format(currency=self.currency, amount=total_cost)
self.payment_received = '{currency}{amount:.2f}'.format(currency=self.currency, amount=payment_received)
self.balance = '{currency}{amount:.2f}'.format(currency=self.currency, amount=balance)
# initialize the pdf variables
self.margin = 15 * mm
self.page_width = letter[0]
self.page_height = letter[1]
self.min_clearance = 3 * mm
self.second_page_available_height = ''
self.second_page_start_y_pos = ''
self.first_page_available_height = ''
self.pdf = None
def is_on_first_page(self):
"""
Returns True if it's the first page of the pdf, False otherwise.
"""
return self.pdf.current_page_count() == 1
def generate_pdf(self, file_buffer):
"""
Takes in a buffer and puts the generated pdf into that buffer.
"""
self.pdf = NumberedCanvas(file_buffer, pagesize=letter)
self.draw_border()
y_pos = self.draw_logos()
self.second_page_available_height = y_pos - self.margin - self.min_clearance
self.second_page_start_y_pos = y_pos
y_pos = self.draw_title(y_pos)
self.first_page_available_height = y_pos - self.margin - self.min_clearance
y_pos = self.draw_course_info(y_pos)
y_pos = self.draw_totals(y_pos)
self.draw_footer(y_pos)
self.pdf.insert_page_break()
self.pdf.save()
def draw_border(self):
"""
Draws a big border around the page leaving a margin of 15 mm on each side.
"""
self.pdf.setStrokeColorRGB(0.5, 0.5, 0.5)
self.pdf.setLineWidth(0.353 * mm)
self.pdf.rect(self.margin, self.margin,
self.page_width - (self.margin * 2), self.page_height - (self.margin * 2),
stroke=True, fill=False)
@staticmethod
def load_image(img_path):
"""
Loads an image given a path. An absolute path is assumed.
If the path points to an image file, it loads and returns the Image object, None otherwise.
"""
try:
img = Image.open(img_path)
except IOError, ex:
log.exception('Pdf unable to open the image file: %s', str(ex))
img = None
return img
def draw_logos(self):
"""
Draws logos.
"""
horizontal_padding_from_border = self.margin + 9 * mm
vertical_padding_from_border = 11 * mm
img_y_pos = self.page_height - (
self.margin + vertical_padding_from_border + max(self.cobrand_logo_height, self.brand_logo_height)
)
# Left-Aligned cobrand logo
if self.cobrand_logo_path:
cobrand_img = self.load_image(self.cobrand_logo_path)
if cobrand_img:
img_width = float(cobrand_img.size[0]) / (float(cobrand_img.size[1]) / self.cobrand_logo_height)
self.pdf.drawImage(cobrand_img.filename, horizontal_padding_from_border, img_y_pos, img_width,
self.cobrand_logo_height, mask='auto')
# Right aligned brand logo
if self.logo_path:
logo_img = self.load_image(self.logo_path)
if logo_img:
img_width = float(logo_img.size[0]) / (float(logo_img.size[1]) / self.brand_logo_height)
self.pdf.drawImage(
logo_img.filename,
self.page_width - (horizontal_padding_from_border + img_width),
img_y_pos,
img_width,
self.brand_logo_height,
mask='auto'
)
return img_y_pos - self.min_clearance
def draw_title(self, y_pos):
"""
Draws the title, order/receipt ID and the date.
"""
if self.is_invoice:
title = (_('Invoice'))
id_label = (_('Invoice'))
else:
title = (_('Receipt'))
id_label = (_('Order'))
# Draw Title "RECEIPT" OR "INVOICE"
vertical_padding = 5 * mm
horizontal_padding_from_border = self.margin + 9 * mm
font_size = 21
self.pdf.setFontSize(font_size)
self.pdf.drawString(horizontal_padding_from_border, y_pos - vertical_padding - font_size / 2, title)
y_pos = y_pos - vertical_padding - font_size / 2 - self.min_clearance
horizontal_padding_from_border = self.margin + 11 * mm
font_size = 12
self.pdf.setFontSize(font_size)
y_pos = y_pos - font_size / 2 - vertical_padding
# Draw Order/Invoice No.
self.pdf.drawString(horizontal_padding_from_border, y_pos,
_(u'{id_label} # {item_id}').format(id_label=id_label, item_id=self.item_id))
y_pos = y_pos - font_size / 2 - vertical_padding
# Draw Date
self.pdf.drawString(
horizontal_padding_from_border, y_pos, _(u'Date: {date}').format(date=self.date)
)
return y_pos - self.min_clearance
def draw_course_info(self, y_pos):
"""
Draws the main table containing the data items.
"""
course_items_data = [
['', (_('Description')), (_('Quantity')), (_('List Price\nper item')), (_('Discount\nper item')),
(_('Amount')), '']
]
for row_item in self.items_data:
course_items_data.append([
'',
Paragraph(row_item['item_description'], getSampleStyleSheet()['Normal']),
row_item['quantity'],
'{currency}{list_price:.2f}'.format(list_price=row_item['list_price'], currency=self.currency),
'{currency}{discount:.2f}'.format(discount=row_item['discount'], currency=self.currency),
'{currency}{item_total:.2f}'.format(item_total=row_item['item_total'], currency=self.currency),
''
])
padding_width = 7 * mm
desc_col_width = 60 * mm
qty_col_width = 26 * mm
list_price_col_width = 21 * mm
discount_col_width = 21 * mm
amount_col_width = 40 * mm
course_items_table = Table(
course_items_data,
[
padding_width,
desc_col_width,
qty_col_width,
list_price_col_width,
discount_col_width,
amount_col_width,
padding_width
],
splitByRow=1,
repeatRows=1
)
course_items_table.setStyle(TableStyle([
#List Price, Discount, Amount data items
('ALIGN', (3, 1), (5, -1), 'RIGHT'),
# Amount header
('ALIGN', (5, 0), (5, 0), 'RIGHT'),
# Amount column (header + data items)
('RIGHTPADDING', (5, 0), (5, -1), 7 * mm),
# Quantity, List Price, Discount header
('ALIGN', (2, 0), (4, 0), 'CENTER'),
# Description header
('ALIGN', (1, 0), (1, -1), 'LEFT'),
# Quantity data items
('ALIGN', (2, 1), (2, -1), 'CENTER'),
# Lines below the header and at the end of the table.
('LINEBELOW', (0, 0), (-1, 0), 1.00, '#cccccc'),
('LINEBELOW', (0, -1), (-1, -1), 1.00, '#cccccc'),
# Innergrid around the data rows.
('INNERGRID', (1, 1), (-2, -1), 0.50, '#cccccc'),
# Entire table
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TOPPADDING', (0, 0), (-1, -1), 2 * mm),
('BOTTOMPADDING', (0, 0), (-1, -1), 2 * mm),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
]))
rendered_width, rendered_height = course_items_table.wrap(0, 0)
table_left_padding = (self.page_width - rendered_width) / 2
split_tables = course_items_table.split(0, self.first_page_available_height)
if len(split_tables) > 1:
# The entire Table won't fit in the available space and requires splitting.
# Draw the part that can fit, start a new page
# and repeat the process with the rest of the table.
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, y_pos - rendered_height)
self.prepare_new_page()
split_tables = split_tables[1].split(0, self.second_page_available_height)
while len(split_tables) > 1:
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, self.second_page_start_y_pos - rendered_height)
self.prepare_new_page()
split_tables = split_tables[1].split(0, self.second_page_available_height)
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, self.second_page_start_y_pos - rendered_height)
else:
# Table will fit without the need for splitting.
course_items_table.drawOn(self.pdf, table_left_padding, y_pos - rendered_height)
if not self.is_on_first_page():
y_pos = self.second_page_start_y_pos
return y_pos - rendered_height - self.min_clearance
def prepare_new_page(self):
"""
Inserts a new page and includes the border and the logos.
"""
self.pdf.insert_page_break()
self.draw_border()
y_pos = self.draw_logos()
return y_pos
def draw_totals(self, y_pos):
"""
Draws the boxes containing the totals and the tax id.
"""
totals_data = [
[(_('Total')), self.total_cost],
[(_('Payment Received')), self.payment_received],
[(_('Balance')), self.balance]
]
if self.is_invoice:
# only print TaxID if we are generating an Invoice
totals_data.append(
['', '{tax_label}: {tax_id}'.format(tax_label=self.tax_label, tax_id=self.tax_id)]
)
heights = 8 * mm
totals_table = Table(totals_data, 40 * mm, heights)
styles = [
# Styling for the totals table.
('ALIGN', (0, 0), (-1, -1), 'RIGHT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
# Styling for the Amounts cells
# NOTE: since we are not printing the TaxID for Credit Card
# based receipts, we need to change the cell range for
# these formatting rules
('RIGHTPADDING', (-1, 0), (-1, 2), 7 * mm),
('GRID', (-1, 0), (-1, 2), 3.0, colors.white),
('BACKGROUND', (-1, 0), (-1, 2), '#EEEEEE'),
]
totals_table.setStyle(TableStyle(styles))
__, rendered_height = totals_table.wrap(0, 0)
left_padding = 97 * mm
if y_pos - (self.margin + self.min_clearance) <= rendered_height:
# if space left on page is smaller than the rendered height, render the table on the next page.
self.prepare_new_page()
totals_table.drawOn(self.pdf, self.margin + left_padding, self.second_page_start_y_pos - rendered_height)
return self.second_page_start_y_pos - rendered_height - self.min_clearance
else:
totals_table.drawOn(self.pdf, self.margin + left_padding, y_pos - rendered_height)
return y_pos - rendered_height - self.min_clearance
def draw_footer(self, y_pos):
"""
Draws the footer.
"""
para_style = getSampleStyleSheet()['Normal']
para_style.fontSize = 8
footer_para = Paragraph(self.footer_text.replace("\n", "<br/>"), para_style)
disclaimer_para = Paragraph(self.disclaimer_text.replace("\n", "<br/>"), para_style)
billing_address_para = Paragraph(self.billing_address_text.replace("\n", "<br/>"), para_style)
footer_data = [
['', footer_para],
[(_('Billing Address')), ''],
['', billing_address_para],
[(_('Disclaimer')), ''],
['', disclaimer_para]
]
footer_style = [
# Styling for the entire footer table.
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
('FONTSIZE', (0, 0), (-1, -1), 9),
('TEXTCOLOR', (0, 0), (-1, -1), '#AAAAAA'),
# Billing Address Header styling
('LEFTPADDING', (0, 1), (0, 1), 5 * mm),
# Disclaimer Header styling
('LEFTPADDING', (0, 3), (0, 3), 5 * mm),
('TOPPADDING', (0, 3), (0, 3), 2 * mm),
# Footer Body styling
# ('BACKGROUND', (1, 0), (1, 0), '#EEEEEE'),
# Billing Address Body styling
('BACKGROUND', (1, 2), (1, 2), '#EEEEEE'),
# Disclaimer Body styling
('BACKGROUND', (1, 4), (1, 4), '#EEEEEE'),
]
if self.is_invoice:
terms_conditions_para = Paragraph(self.terms_conditions_text.replace("\n", "<br/>"), para_style)
footer_data.append([(_('TERMS AND CONDITIONS')), ''])
footer_data.append(['', terms_conditions_para])
# TERMS AND CONDITIONS header styling
footer_style.append(('LEFTPADDING', (0, 5), (0, 5), 5 * mm))
footer_style.append(('TOPPADDING', (0, 5), (0, 5), 2 * mm))
# TERMS AND CONDITIONS body styling
footer_style.append(('BACKGROUND', (1, 6), (1, 6), '#EEEEEE'))
footer_table = Table(footer_data, [5 * mm, 176 * mm])
footer_table.setStyle(TableStyle(footer_style))
__, rendered_height = footer_table.wrap(0, 0)
if y_pos - (self.margin + self.min_clearance) <= rendered_height:
self.prepare_new_page()
footer_table.drawOn(self.pdf, self.margin, self.margin + 5 * mm)
| agpl-3.0 |
omefire/bitcoin | qa/rpc-tests/walletbackup.py | 132 | 7263 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(BitcoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| mit |
yatinag/selfcure | lib/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| apache-2.0 |
liyy7/scrapy | scrapy/extensions/httpcache.py | 102 | 16232 | from __future__ import print_function
import os
import gzip
from six.moves import cPickle as pickle
from importlib import import_module
from time import time
from weakref import WeakKeyDictionary
from email.utils import mktime_tz, parsedate_tz
from w3lib.http import headers_raw_to_dict, headers_dict_to_raw
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy.utils.httpobj import urlparse_cached
class DummyPolicy(object):
def __init__(self, settings):
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_http_codes = [int(x) for x in settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')]
def should_cache_request(self, request):
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response, request):
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(self, response, request):
return True
def is_cached_response_valid(self, cachedresponse, response, request):
return True
class RFC2616Policy(object):
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings):
self.always_store = settings.getbool('HTTPCACHE_ALWAYS_STORE')
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_response_cache_controls = settings.getlist('HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS')
self._cc_parsed = WeakKeyDictionary()
def _parse_cachecontrol(self, r):
if r not in self._cc_parsed:
cch = r.headers.get('Cache-Control', '')
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request):
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
if 'no-store' in cc:
return False
# Any other is eligible for caching
return True
def should_cache_response(self, response, request):
# What is cacheable - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec14.9.1
# Response cacheability - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if 'no-store' in cc:
return False
# Never cache 304 (Not Modified) responses
elif response.status == 304:
return False
# Cache unconditionally if configured to do so
elif self.always_store:
return True
# Any hint on response expiration is good
elif 'max-age' in cc or 'Expires' in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
elif response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
elif response.status in (200, 203, 401):
return 'Last-Modified' in response.headers or 'ETag' in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
else:
return False
def is_cached_response_fresh(self, cachedresponse, request):
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if 'no-cache' in cc or 'no-cache' in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if 'max-stale' in ccreq and 'must-revalidate' not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq['max-stale']
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(self, cachedresponse, response, request):
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if 'must-revalidate' not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(self, request, cachedresponse):
if 'Last-Modified' in cachedresponse.headers:
request.headers['If-Modified-Since'] = cachedresponse.headers['Last-Modified']
if 'ETag' in cachedresponse.headers:
request.headers['If-None-Match'] = cachedresponse.headers['ETag']
def _get_max_age(self, cc):
try:
return max(0, int(cc['max-age']))
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(self, response, request, now):
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#410
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get('Date')) or now
# Try HTTP/1.0 Expires header
if 'Expires' in response.headers:
expires = rfc1123_to_epoch(response.headers['Expires'])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get('Last-Modified'))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute fresshness lifetime
return 0
def _compute_current_age(self, response, request, now):
# Reference nsHttpResponseHead::ComputeCurrentAge
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#366
currentage = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get('Date')) or now
if now > date:
currentage = now - date
if 'Age' in response.headers:
try:
age = int(response.headers['Age'])
currentage = max(currentage, age)
except ValueError:
pass
return currentage
class DbmCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.dbmodule = import_module(settings['HTTPCACHE_DBM_MODULE'])
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.db' % spider.name)
self.db = self.dbmodule.open(dbpath, 'c')
def close_spider(self, spider):
self.db.close()
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
self.db['%s_data' % key] = pickle.dumps(data, protocol=2)
self.db['%s_time' % key] = str(time())
def _read_data(self, spider, request):
key = self._request_key(request)
db = self.db
tkey = '%s_time' % key
if tkey not in db:
return # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return # expired
return pickle.loads(db['%s_data' % key])
def _request_key(self, request):
return request_fingerprint(request)
class FilesystemCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.use_gzip = settings.getbool('HTTPCACHE_GZIP')
self._open = gzip.open if self.use_gzip else open
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return # not cached
rpath = self._get_request_path(spider, request)
with self._open(os.path.join(rpath, 'response_body'), 'rb') as f:
body = f.read()
with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f:
rawheaders = f.read()
url = metadata.get('response_url')
status = metadata['status']
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
rpath = self._get_request_path(spider, request)
if not os.path.exists(rpath):
os.makedirs(rpath)
metadata = {
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': time(),
}
with self._open(os.path.join(rpath, 'meta'), 'wb') as f:
f.write(repr(metadata))
with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f:
pickle.dump(metadata, f, protocol=2)
with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(os.path.join(rpath, 'response_body'), 'wb') as f:
f.write(response.body)
with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(os.path.join(rpath, 'request_body'), 'wb') as f:
f.write(request.body)
def _get_request_path(self, spider, request):
key = request_fingerprint(request)
return os.path.join(self.cachedir, spider.name, key[0:2], key)
def _read_meta(self, spider, request):
rpath = self._get_request_path(spider, request)
metapath = os.path.join(rpath, 'pickled_meta')
if not os.path.exists(metapath):
return # not found
mtime = os.stat(rpath).st_mtime
if 0 < self.expiration_secs < time() - mtime:
return # expired
with self._open(metapath, 'rb') as f:
return pickle.load(f)
class LeveldbCacheStorage(object):
def __init__(self, settings):
import leveldb
self._leveldb = leveldb
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.leveldb' % spider.name)
self.db = self._leveldb.LevelDB(dbpath)
def close_spider(self, spider):
# Do compactation each time to save space and also recreate files to
# avoid them being removed in storages with timestamp-based autoremoval.
self.db.CompactRange()
del self.db
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
batch = self._leveldb.WriteBatch()
batch.Put('%s_data' % key, pickle.dumps(data, protocol=2))
batch.Put('%s_time' % key, str(time()))
self.db.Write(batch)
def _read_data(self, spider, request):
key = self._request_key(request)
try:
ts = self.db.Get('%s_time' % key)
except KeyError:
return # not found or invalid entry
if 0 < self.expiration_secs < time() - float(ts):
return # expired
try:
data = self.db.Get('%s_data' % key)
except KeyError:
return # invalid entry
else:
return pickle.loads(data)
def _request_key(self, request):
return request_fingerprint(request)
def parse_cachecontrol(header):
"""Parse Cache-Control header
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol('public, max-age=3600') == {'public': None,
... 'max-age': '3600'}
True
>>> parse_cachecontrol('') == {}
True
"""
directives = {}
for directive in header.split(','):
key, sep, val = directive.strip().partition('=')
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str):
try:
return mktime_tz(parsedate_tz(date_str))
except Exception:
return None
| bsd-3-clause |
cschenck/SmoothParticleNets | tests/test_particleprojection.py | 1 | 9566 | import os
import sys
# Add path to python source to path.
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "python"))
import SmoothParticleNets as spn
import itertools
import numpy as np
import torch
import torch.autograd
from gradcheck import gradcheck
from test_convsdf import quaternionMult, quaternionConjugate
try:
import pytest_args
except ImportError:
print("Make sure to compile SmoothParticleNets before running tests.")
raise
def pyproject(camera_fl, camera_size, filter_std, filter_scale, locs, camera_pose,
camera_rot, depth_mask=None, dtype=np.float32):
batch_size = locs.shape[0]
N = locs.shape[1]
ret = np.zeros((batch_size, camera_size[1], camera_size[0]), dtype=dtype)
if depth_mask is None:
depth_mask = np.ones((batch_size, camera_size[1], camera_size[0]),
dtype=dtype)*np.finfo(np.float32).max
for b in range(batch_size):
for n in range(N):
r = locs[b, n, :]
t = r - camera_pose[b, ...]
t = np.array([t[0], t[1], t[2], 0.0], dtype=dtype)
t = np.array(quaternionMult(quaternionConjugate(camera_rot[b, :]),
quaternionMult(t, camera_rot[b, :])), dtype=dtype)
if t[2] <= 0:
continue
p = np.array([t[0]*camera_fl/t[2] + camera_size[0]/2.0,
t[1]*camera_fl/t[2] + camera_size[1]/2.0],
dtype=dtype)
s = np.ceil(filter_std*2)
f = filter_scale/(filter_std*np.sqrt(2*np.pi))
for i in np.arange(max(0, p[0] - s), min(camera_size[0], p[0] + s + 1), 1):
for j in np.arange(max(0, p[1] - s), min(camera_size[1], p[1] + s + 1), 1):
if depth_mask[b, int(j), int(i)] < t[2]:
continue
xi = int(i) + 0.5
yj = int(j) + 0.5
d2 = (xi - p[0])**2 + (yj - p[1])**2
if d2 > s*s:
continue
v = f*np.exp(-d2/(2.0*filter_std*filter_std))
ret[b, int(j), int(i)] += v
return ret
def test_particleprojection(cpu=True, cuda=True):
if cpu:
print("Testing CPU implementation of ParticleProjection...")
eval_particleprojection(cuda=False)
print("CPU implementation passed!")
print("")
if cuda:
if pytest_args.with_cuda:
print("Testing CUDA implementation of ParticleProjection...")
eval_particleprojection(cuda=True)
print("CUDA implementation passed!")
else:
print("Not compiled with CUDA, skipping CUDA test.")
def eval_particleprojection(cuda=False):
np.random.seed(1)
BATCH_SIZE = 2
N = 5
CAMERA_FOV = 45.0/180.0*np.pi
CAMERA_SIZE = (120, 90)
# CAMERA_SIZE = (1024, 768)
CAMERA_FL = CAMERA_SIZE[0]/2/(CAMERA_FOV/2.0)
FILTER_STD = 5
FILTER_SCALE = 1.0/0.06
CAMERA_POSE = 5.0*(np.random.rand(BATCH_SIZE, 3).astype(np.float32) - 0.5)
CAMERA_TARGET = np.array([(0.0, 0.0, 0.0)]*BATCH_SIZE, dtype=np.float32)
CAMERA_ROT = np.zeros((BATCH_SIZE, 4), dtype=np.float32)
for b in range(BATCH_SIZE):
CAMERA_ROT[b, :] = pointAt(CAMERA_POSE[b, :], np.array([0, 0, 0], dtype=np.float32))
locs = 2.0*(np.random.rand(BATCH_SIZE, N, 3).astype(np.float32) - 0.5)
depth_mask = np.ones((BATCH_SIZE, CAMERA_SIZE[1], CAMERA_SIZE[0]),
dtype=np.float32)*np.finfo(np.float32).max
ir = (int(CAMERA_SIZE[0]/2 - CAMERA_SIZE[0]*0.2), int(CAMERA_SIZE[0]/2 + CAMERA_SIZE[0]*0.2) + 1)
jr = (int(CAMERA_SIZE[1]/2 - CAMERA_SIZE[1]*0.2), int(CAMERA_SIZE[1]/2 + CAMERA_SIZE[1]*0.2) + 1)
ul = 0.0
lr = 10.0
ur = 5.0
ll = 3.5
for i in range(ir[0], ir[1]):
for j in range(jr[0], jr[1]):
ii = 1.0*(i - ir[0])/(ir[1] - ir[0])
jj = 1.0*(j - jr[0])/(jr[1] - jr[0])
l = ul*(1 - jj) + ll*jj
r = ur*(1 - jj) + lr*jj
depth_mask[0, j, i] = l*(1 - ii) + r*ii
def use_cuda(x):
if cuda:
return x.cuda()
else:
return x
def undo_cuda(x):
if cuda:
return x.cpu()
else:
return x
def np2var(t):
return torch.autograd.Variable(use_cuda(torch.from_numpy(t)), requires_grad=False)
locs_t = torch.autograd.Variable(use_cuda(torch.FloatTensor(locs)), requires_grad=True)
depth_mask_t = torch.autograd.Variable(use_cuda(torch.FloatTensor(depth_mask)), requires_grad=False)
camera_pose_t = torch.autograd.Variable(use_cuda(torch.FloatTensor(CAMERA_POSE)),
requires_grad=False)
camera_rot_t = torch.autograd.Variable(use_cuda(torch.FloatTensor(CAMERA_ROT)),
requires_grad=False)
particleProjection = spn.ParticleProjection(CAMERA_FL, CAMERA_SIZE, FILTER_STD, FILTER_SCALE)
# particleViewer([
# lambda p, r: pyproject(CAMERA_FL, CAMERA_SIZE, FILTER_STD, FILTER_SCALE, locs,
# p, r, depth_mask),
# lambda p, r: undo_cuda(particleProjection(locs_t, np2var(p), np2var(r),
# depth_mask_t)).data.numpy(),
# ], BATCH_SIZE, 5, ["Ground Truth", "Output"])
# return
ground_truth = pyproject(CAMERA_FL, CAMERA_SIZE, FILTER_STD, FILTER_SCALE, locs,
CAMERA_POSE, CAMERA_ROT, depth_mask)
pred_t = particleProjection(locs_t, camera_pose_t, camera_rot_t, depth_mask_t)
pred = undo_cuda(pred_t).data.numpy()
# visualizeOutput([ground_truth, pred, -(pred - ground_truth)],
# ["Ground Truth", "Prediction", "Difference"])
np.testing.assert_array_almost_equal(pred, ground_truth, decimal=3)
# Use pyconvsp to allow for double precision when computing numeric grads.
def func_numerical(l):
ll = undo_cuda(l).data.numpy()
return torch.autograd.Variable(use_cuda(torch.from_numpy(pyproject(CAMERA_FL, CAMERA_SIZE,
FILTER_STD, FILTER_SCALE, ll, CAMERA_POSE, CAMERA_ROT, dtype=np.float64))),
requires_grad=False)
def func_analytical(l):
return particleProjection(l, camera_pose_t, camera_rot_t)
assert gradcheck(func_analytical, (locs_t,), eps=1e-6, atol=1e-3, rtol=1e-2,
func_numerical=func_numerical, use_double=True)
def quaternionFromMatrix(matrix):
M = matrix
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return [q[1], q[2], q[3], q[0]]
def pointAt(pose, target):
# Convention: +Z=out of camera, +Y=Down, +X=right
z = target - pose
z /= np.sqrt(np.sum(z**2))
y = np.array([0, -1, 0], dtype=np.float32)
x = np.cross(y, z)
x /= np.sqrt(np.sum(x**2))
y = np.cross(z, x)
ret = quaternionFromMatrix(np.array([x, y, z]).transpose())
return ret
def visualizeOutput(outputs, titles=None):
import cv2
if titles is None:
titles = [str(i) for i in range(len(funcs))]
for ret, title in zip(outputs, titles):
img = np.ones(((ret.shape[1] + 5)*ret.shape[0], ret.shape[2]), dtype=np.float32)
for b in range(ret.shape[0]):
i = b*(ret.shape[1] + 5)
j = (b + 1)*(ret.shape[1] + 5) - 5
img[i:j, :] = ret[b, ...]
cv2.imshow(title, img)
return cv2.waitKey(0)
# Utility function for visualizing particles.
def particleViewer(funcs, batch_size, radius, titles=None):
ESCAPE = 1048603
LEFT = 1113937
RIGHT = 1113939
UP = 1113938
DOWN = 1113940
hangle = 0.0
vangle = 0.0
if titles is None:
titles = [str(i) for i in range(len(funcs))]
k = None
while k != ESCAPE:
y = radius*np.sin(vangle)
r = radius*np.cos(vangle)
x = r*np.cos(hangle)
z = r*np.sin(hangle)
pose = [x, y, z]
camera_pose = np.array([pose]*batch_size, dtype=np.float32)
camera_rot = np.array([pointAt(pose, np.array([0, 0, 0], dtype=np.float32))]*batch_size,
dtype=np.float32)
k = visualizeOutput([func(camera_pose, camera_rot) for func in funcs], titles)
if k == LEFT:
hangle += 1.0/180.0*np.pi
if k == RIGHT:
hangle -= 1.0/180.0*np.pi
if k == UP:
vangle += 1.0/180.0*np.pi
if k == DOWN:
vangle -= 1.0/180.0*np.pi
if vangle > np.pi/2:
vangle = np.pi/2
elif vangle < -np.pi/2:
vangle = -np.pi/2
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', dest='cpu', action="store_true", default=True)
parser.add_argument('--no-cpu', dest='cpu', action="store_false")
parser.add_argument('--cuda', dest='cuda', action="store_true", default=True)
parser.add_argument('--no-cuda', dest='cuda', action="store_false")
args = parser.parse_args()
test_particleprojection(cpu=args.cpu, cuda=args.cuda) | mit |
adamreis/nyc-jazz | src/lib/wtforms/form.py | 51 | 10965 | from wtforms.compat import with_metaclass, iteritems, itervalues
__all__ = (
'BaseForm',
'Form',
)
class BaseForm(object):
"""
Base Form Class. Provides core behaviour like field construction,
validation, and data and error proxying.
"""
def __init__(self, fields, prefix=''):
"""
:param fields:
A dict or sequence of 2-tuples of partially-constructed fields.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
"""
if prefix and prefix[-1] not in '-_;:/.':
prefix += '-'
self._prefix = prefix
self._errors = None
self._fields = {}
if hasattr(fields, 'iteritems'):
fields = fields.iteritems()
elif hasattr(fields, 'items'):
# Python 3.x
fields = fields.items()
translations = self._get_translations()
for name, unbound_field in fields:
field = unbound_field.bind(form=self, name=name, prefix=prefix, translations=translations)
self._fields[name] = field
def __iter__(self):
""" Iterate form fields in arbitrary order """
return iter(itervalues(self._fields))
def __contains__(self, name):
""" Returns `True` if the named field is a member of this form. """
return (name in self._fields)
def __getitem__(self, name):
""" Dict-style access to this form's fields."""
return self._fields[name]
def __setitem__(self, name, value):
""" Bind a field to this form. """
self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
def __delitem__(self, name):
""" Remove a field from this form. """
del self._fields[name]
def _get_translations(self):
"""
Override in subclasses to provide alternate translations factory.
Must return an object that provides gettext() and ngettext() methods.
"""
return None
def populate_obj(self, obj):
"""
Populates the attributes of the passed `obj` with data from the form's
fields.
:note: This is a destructive operation; Any attribute with the same name
as a field will be overridden. Use with caution.
"""
for name, field in iteritems(self._fields):
field.populate_obj(obj, name)
def process(self, formdata=None, obj=None, **kwargs):
"""
Take form, object data, and keyword arg input and have the fields
process them.
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
formdata = WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
for name, field, in iteritems(self._fields):
if obj is not None and hasattr(obj, name):
field.process(formdata, getattr(obj, name))
elif name in kwargs:
field.process(formdata, kwargs[name])
else:
field.process(formdata)
def validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in iteritems(self._fields):
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
if not field.validate(self, extra):
success = False
return success
@property
def data(self):
return dict((name, f.data) for name, f in iteritems(self._fields))
@property
def errors(self):
if self._errors is None:
self._errors = dict((name, f.errors) for name, f in iteritems(self._fields) if f.errors)
return self._errors
class FormMeta(type):
"""
The metaclass for `Form` and any subclasses of `Form`.
`FormMeta`'s responsibility is to create the `_unbound_fields` list, which
is a list of `UnboundField` instances sorted by their order of
instantiation. The list is created at the first instantiation of the form.
If any fields are added/removed from the form, the list is cleared to be
re-generated on the next instantiaton.
Any properties which begin with an underscore or are not `UnboundField`
instances are ignored by the metaclass.
"""
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_fields = None
def __call__(cls, *args, **kwargs):
"""
Construct a new `Form` instance, creating `_unbound_fields` on the
class if it is empty.
"""
if cls._unbound_fields is None:
fields = []
for name in dir(cls):
if not name.startswith('_'):
unbound_field = getattr(cls, name)
if hasattr(unbound_field, '_formfield'):
fields.append((name, unbound_field))
# We keep the name as the second element of the sort
# to ensure a stable sort.
fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
cls._unbound_fields = fields
return type.__call__(cls, *args, **kwargs)
def __setattr__(cls, name, value):
"""
Add an attribute to the class, clearing `_unbound_fields` if needed.
"""
if not name.startswith('_') and hasattr(value, '_formfield'):
cls._unbound_fields = None
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""
Remove an attribute from the class, clearing `_unbound_fields` if
needed.
"""
if not name.startswith('_'):
cls._unbound_fields = None
type.__delattr__(cls, name)
class Form(with_metaclass(FormMeta, BaseForm)):
"""
Declarative Form base class. Extends BaseForm's core behaviour allowing
fields to be defined on Form subclasses as class attributes.
In addition, form and instance input data are taken at construction time
and passed to `process()`.
"""
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
"""
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent. formdata should be some sort of request-data wrapper which
can get multiple parameters from the form input, and values are unicode
strings, e.g. a Werkzeug/Django/WebOb MultiDict
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
super(Form, self).__init__(self._unbound_fields, prefix=prefix)
for name, field in iteritems(self._fields):
# Set all the fields to attributes so that they obscure the class
# attributes with the same names.
setattr(self, name, field)
self.process(formdata, obj, **kwargs)
def __iter__(self):
""" Iterate form fields in their order of definition on the form. """
for name, _ in self._unbound_fields:
if name in self._fields:
yield self._fields[name]
def __setitem__(self, name, value):
raise TypeError('Fields may not be added to Form instances, only classes.')
def __delitem__(self, name):
del self._fields[name]
setattr(self, name, None)
def __delattr__(self, name):
if name in self._fields:
self.__delitem__(name)
else:
# This is done for idempotency, if we have a name which is a field,
# we want to mask it by setting the value to None.
unbound_field = getattr(self.__class__, name, None)
if unbound_field is not None and hasattr(unbound_field, '_formfield'):
setattr(self, name, None)
else:
super(Form, self).__delattr__(name)
def validate(self):
"""
Validates the form by calling `validate` on each field, passing any
extra `Form.validate_<fieldname>` validators to the field validator.
"""
extra = {}
for name in self._fields:
inline = getattr(self.__class__, 'validate_%s' % name, None)
if inline is not None:
extra[name] = [inline]
return super(Form, self).validate(extra)
class WebobInputWrapper(object):
"""
Wrap a webob MultiDict for use as passing as `formdata` to Field.
Since for consistency, we have decided in WTForms to support as input a
small subset of the API provided in common between cgi.FieldStorage,
Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the
only supported framework whose multidict does not fit this API, but is
nevertheless used by a lot of frameworks.
While we could write a full wrapper to support all the methods, this will
undoubtedly result in bugs due to some subtle differences between the
various wrappers. So we will keep it simple.
"""
def __init__(self, multidict):
self._wrapped = multidict
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
def __contains__(self, name):
return (name in self._wrapped)
def getlist(self, name):
return self._wrapped.getall(name)
| mit |
royc1/gpdb | gpMgmt/bin/gppylib/gparray.py | 4 | 104546 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
gparray.py:
Contains three classes representing configuration information of a
Greenplum array:
GpArray - The primary interface - collection of all GpDB within an array
GpDB - represents configuration information for a single dbid
Segment - collection of all GpDB with the same content id
"""
# ============================================================================
from datetime import date
import copy
import traceback
from gppylib.utils import checkNotNone, checkIsInt
from gppylib import gplog
from gppylib.db import dbconn
from gppylib.gpversion import GpVersion
from gppylib.commands.unix import *
SYSTEM_FILESPACE = 3052 # oid of the system filespace
logger = gplog.get_default_logger()
DESTINATION_FILE_SPACES_DIRECTORY = "fs_directory"
ROLE_PRIMARY = 'p'
ROLE_MIRROR = 'm'
VALID_ROLES = [ROLE_PRIMARY, ROLE_MIRROR]
# Map gp_segment_configuration role values to values from gp_primarymirror.
ROLE_TO_MODE_MAP = {}
SEG_MODE_PRIMARY = "PrimarySegment"
SEG_MODE_MIRROR = "MirrorSegment"
ROLE_TO_MODE_MAP[ROLE_PRIMARY] = SEG_MODE_PRIMARY
ROLE_TO_MODE_MAP[ROLE_MIRROR] = SEG_MODE_MIRROR
STATUS_UP = 'u'
STATUS_DOWN = 'd'
VALID_STATUS = [STATUS_UP, STATUS_DOWN]
MODE_NOT_INITIALIZED = '' # no mirroring
MODE_CHANGELOGGING = 'c' # filerep logging
MODE_SYNCHRONIZED = 's' # filerep synchronized
MODE_RESYNCHRONIZATION = 'r' #
# Map gp_segment_configuration mode values to values retured from gp_primarymirror.
MODE_TO_DATA_STATE_MAP = {}
SEG_DATA_STATE_NOT_INITIALIZED = "NotInitialized"
SEG_DATA_STATE_IN_CHANGE_TRACKING = "InChangeTracking"
SEG_DATA_STATE_SYNCHRONIZED = "InSync"
SEG_DATA_STATE_IN_RESYNC = "InResync"
MODE_TO_DATA_STATE_MAP[MODE_NOT_INITIALIZED] = SEG_DATA_STATE_NOT_INITIALIZED
MODE_TO_DATA_STATE_MAP[MODE_CHANGELOGGING] = SEG_DATA_STATE_IN_CHANGE_TRACKING
MODE_TO_DATA_STATE_MAP[MODE_SYNCHRONIZED] = SEG_DATA_STATE_SYNCHRONIZED
MODE_TO_DATA_STATE_MAP[MODE_RESYNCHRONIZATION] = SEG_DATA_STATE_IN_RESYNC
# SegmentState values returned from gp_primarymirror.
SEGMENT_STATE_NOT_INITIALIZED = "NotInitialized"
SEGMENT_STATE_INITIALIZATION = "Initialization"
SEGMENT_STATE_IN_CHANGE_TRACKING_TRANSITION = "InChangeTrackingTransition"
SEGMENT_STATE_IN_RESYNCTRANSITION = "InResyncTransition"
SEGMENT_STATE_IN_SYNC_TRANSITION = "InSyncTransition"
SEGMENT_STATE_READY = "Ready"
SEGMENT_STATE_CHANGE_TRACKING_DISABLED = "ChangeTrackingDisabled"
SEGMENT_STATE_FAULT = "Fault"
SEGMENT_STATE_SHUTDOWN_BACKENDS = "ShutdownBackends"
SEGMENT_STATE_SHUTDOWN = "Shutdown"
SEGMENT_STATE_IMMEDIATE_SHUTDOWN = "ImmediateShutdown"
VALID_MODE = [
MODE_SYNCHRONIZED,
MODE_CHANGELOGGING,
MODE_RESYNCHRONIZATION,
]
MODE_LABELS = {
MODE_CHANGELOGGING: "Change Tracking",
MODE_SYNCHRONIZED: "Synchronized",
MODE_RESYNCHRONIZATION: "Resynchronizing"
}
# These are all the valid states primary/mirror pairs can
# be in. Any configuration other than this will cause the
# FTS Prober to bring down the master postmaster until the
# configuration is corrected. Here, primary and mirror refer
# to the segments current role, not the preferred_role.
#
# The format of the tuples are:
# (<primary status>, <prmary mode>, <mirror status>, <mirror_mode>)
VALID_SEGMENT_STATES = [
(STATUS_UP, MODE_CHANGELOGGING, STATUS_DOWN, MODE_SYNCHRONIZED),
(STATUS_UP, MODE_CHANGELOGGING, STATUS_DOWN, MODE_RESYNCHRONIZATION),
(STATUS_UP, MODE_RESYNCHRONIZATION, STATUS_UP, MODE_RESYNCHRONIZATION),
(STATUS_UP, MODE_SYNCHRONIZED, STATUS_UP, MODE_SYNCHRONIZED)
]
def getDataModeLabel(mode):
return MODE_LABELS[mode]
FAULT_STRATEGY_NONE = 'n' # mirrorless systems
FAULT_STRATEGY_FILE_REPLICATION = 'f' # valid for versions 4.0+
FAULT_STRATEGY_SAN = 's' # valid for versions 4.0+
FAULT_STRATEGY_READONLY = 'r' # valid for versions 3.x
FAULT_STRATEGY_CONTINUE = 'c' # valid for versions 3.x
FAULT_STRATEGY_LABELS = {
FAULT_STRATEGY_NONE: "none",
FAULT_STRATEGY_FILE_REPLICATION: "physical mirroring",
FAULT_STRATEGY_SAN: "SAN failover",
FAULT_STRATEGY_READONLY: "readonly",
FAULT_STRATEGY_CONTINUE: "continue",
}
VALID_FAULT_STRATEGY = FAULT_STRATEGY_LABELS.keys()
MASTER_CONTENT_ID = -1
def getFaultStrategyLabel(strategy):
return FAULT_STRATEGY_LABELS[strategy]
class InvalidSegmentConfiguration(Exception):
"""Exception raised when an invalid gparray configuration is
read from gp_segment_configuration or an attempt to save an
invalid gparray configuration is made."""
def __init__(self, array):
self.array = array
def __str__(self):
return "Invalid GpArray: %s" % self.array
# ============================================================================
# ============================================================================
class GpDB:
"""
GpDB class representing configuration information for a single dbid
within a Greenplum Array.
"""
# --------------------------------------------------------------------
def __init__(self, content, preferred_role, dbid, role, mode, status,
hostname, address, port, datadir, replicationPort):
# Todo: replace all these fields with private alternatives:
# e.g. '_content' instead of 'content'.
#
# Other code should go through class interfaces for access, this
# will allow easier modifications in the future.
self.content=content
self.preferred_role=preferred_role
self.dbid=dbid
self.role=role
self.mode=mode
self.status=status
self.hostname=hostname
self.address=address
self.port=port
self.datadir=datadir
self.replicationPort=replicationPort
# Filespace mappings for this segment
# Todo: Handle self.datadir more cleanly
# Todo: Handle initialization more cleanly
self.__filespaces = { SYSTEM_FILESPACE: datadir }
# Pending filespace creation
self.__pending_filespace = None
# Catalog directory for each database in this segment
self.catdirs = None
# Todo: Remove old dead code
self.valid = (status == 'u')
# --------------------------------------------------------------------
def __str__(self):
"""
Construct a printable string representation of a GpDB
"""
return "%s:%s:content=%s:dbid=%s:mode=%s:status=%s" % (
self.hostname,
self.datadir,
self.content,
self.dbid,
self.mode,
self.status
)
#
# Note that this is not an ideal comparison -- it uses the string representation
# for comparison
#
def __cmp__(self,other):
left = repr(self)
right = repr(other)
if left < right: return -1
elif left > right: return 1
else: return 0
#
# Moved here from system/configurationImplGpdb.py
#
def equalIgnoringModeAndStatusAndReplicationPort(self, other):
"""
Return true if none of the "core" attributes (e.g. filespace)
of two segments differ, false otherwise.
This method is used by updateSystemConfig() to know when a catalog
change will cause removing and re-adding a mirror segment.
"""
firstMode = self.getSegmentMode()
firstStatus = self.getSegmentStatus()
firstReplicationPort = self.getSegmentReplicationPort()
try:
# make the elements we don't want to compare match and see if they are then equal
self.setSegmentMode(other.getSegmentMode())
self.setSegmentStatus(other.getSegmentStatus())
self.setSegmentReplicationPort(other.getSegmentReplicationPort())
return self == other
finally:
#
# restore mode and status after comaprison
#
self.setSegmentMode(firstMode)
self.setSegmentStatus(firstStatus)
self.setSegmentReplicationPort(firstReplicationPort)
# --------------------------------------------------------------------
def __repr__(self):
"""
Construct a string representation of class, must be sufficient
information to call initFromString on the result and deterministic
so it can be used for __cmp__ comparison
"""
# Note: this doesn't currently handle "pending filespaces", but
# this is not currently required since gpfilespace is the only code
# that generates pending filespaces and it never serializes a gparray
# object.
fsOids = [oid for oid in self.__filespaces]
fsOids.sort() # sort for determinism
filespaces = []
for fsoid in fsOids:
if fsoid not in [SYSTEM_FILESPACE]:
filespaces.append("%d:%s" % (fsoid, self.__filespaces[fsoid]))
return '%d|%d|%s|%s|%s|%s|%s|%s|%d|%s|%s|%s|%s' % (
self.dbid,
self.content,
self.role,
self.preferred_role,
self.mode,
self.status,
self.hostname,
self.address,
self.port,
self.replicationPort,
self.datadir,
','.join(filespaces), # this is rather ugly
','.join(self.catdirs) if self.catdirs else []
)
# --------------------------------------------------------------------
@staticmethod
def initFromString(s):
"""
Factory method, initializes a GpDB object from string representation.
- Used when importing from file format.
- TODO: Should be compatable with repr() formatting.
"""
tup = s.strip().split('|')
# Old format: 8 fields
# Todo: remove the need for this, or rework it to be cleaner
if len(tup) == 8:
# This describes the gp_configuration catalog (pre 3.4)
content = int(tup[0])
definedprimary = tup[1]
dbid = int(tup[2])
isprimary = tup[3]
valid = tup[4]
address = tup[5]
port = int(tup[6])
datadir = tup[7]
# Calculate new fields from old ones
#
# Note: this should be kept in sync with the code in
# GpArray.InitFromCatalog() code for initializing old catalog
# formats.
preferred_role = ROLE_PRIMARY if definedprimary else ROLE_MIRROR
role = ROLE_PRIMARY if isprimary else ROLE_MIRROR
hostname = None
mode = MODE_SYNCHRONIZED # ???
status = STATUS_UP if valid else STATUS_DOWN
replicationPort = None
filespaces = ""
catdirs = ""
# Catalog 3.4 format: 12 fields
elif len(tup) == 12:
# This describes the gp_segment_configuration catalog (3.4)
dbid = int(tup[0])
content = int(tup[1])
role = tup[2]
preferred_role = tup[3]
mode = tup[4]
status = tup[5]
hostname = tup[6]
address = tup[7]
port = int(tup[8])
replicationPort = tup[9]
datadir = tup[10] # from the pg_filespace_entry table
filespaces = tup[11]
catdirs = ""
# Catalog 4.0+: 13 fields
elif len(tup) == 13:
# This describes the gp_segment_configuration catalog (3.4+)
dbid = int(tup[0])
content = int(tup[1])
role = tup[2]
preferred_role = tup[3]
mode = tup[4]
status = tup[5]
hostname = tup[6]
address = tup[7]
port = int(tup[8])
replicationPort = tup[9]
datadir = tup[10] # from the pg_filespace_entry table
filespaces = tup[11]
catdirs = tup[12]
else:
raise Exception("GpDB unknown input format: %s" % s)
# Initialize segment without filespace information
gpdb = GpDB(content = content,
preferred_role = preferred_role,
dbid = dbid,
role = role,
mode = mode,
status = status,
hostname = hostname,
address = address,
port = port,
datadir = datadir,
replicationPort = replicationPort)
# Add in filespace information, if present
for fs in filespaces.split(","):
if fs == "":
continue
(fsoid, fselocation) = fs.split(":")
gpdb.addSegmentFilespace(fsoid, fselocation)
# Add Catalog Dir, if present
gpdb.catdirs = []
for d in catdirs.split(","):
if d == "":
continue
gpdb.catdirs.append(d)
# Return the completed segment
return gpdb
# --------------------------------------------------------------------
@staticmethod
def getDataDirPrefix(datadir):
retValue = ""
retValue = datadir[:datadir.rfind('/')]
return retValue
# --------------------------------------------------------------------
@staticmethod
def getFileSpaceDirsWithNewSuffix(fileSpaceDictionary, suffix, includeSystemFilespace = True):
"""
This method will take the a dictionary of file spaces and return the same dictionary with the new sufix.
"""
retValue = {}
for entry in fileSpaceDictionary:
if entry == SYSTEM_FILESPACE and includeSystemFilespace == False:
continue
newDir = GpDB.getDataDirPrefix(fileSpaceDictionary[entry])
newDir = newDir + "/" + suffix
retValue[entry] = newDir
return retValue
# --------------------------------------------------------------------
@staticmethod
def replaceFileSpaceContentID(fileSpaceDictionary, oldContent, newContent):
retValue = {}
for entry in fileSpaceDictionary:
tempDir = fileSpaceDictionary[entry]
tempDir = tempDir[:tempDir.rfind(str(oldContent))]
tempDir += ('%d' % newContent)
retValue[entry] = tempDir
return retValue
# --------------------------------------------------------------------
def copy(self):
"""
Creates a copy of the segment, shallow for everything except the filespaces map
"""
res = copy.copy(self)
res.__filespaces = copy.copy(self.__filespaces)
return res
# --------------------------------------------------------------------
def createTemplate(self, dstDir):
"""
Create a tempate given the information in this GpDB.
"""
# Make sure we have enough room in the dstDir to fit the segment and its filespaces.
duCmd = DiskUsage( name = "srcDir"
, directory = dstDir
)
duCmd.run(validateAfter=True)
requiredSize = duCmd.get_bytes_used()
name = "segcopy filespace get_size"
for oid in self.__filespaces:
if oid == SYSTEM_FILESPACE:
continue
dir = self.__filespaces[oid]
duCmd = DiskUsage(name, dir)
duCmd.run(validateAfter=True)
size = duCmd.get_bytes_used()
requiredSize = requiredSize + size
dstBytesAvail = DiskFree.get_size_local(name = "Check for available free space for segment template", directory = dstDir)
if dstBytesAvail <= requiredSize:
raise Exception("Not enough space on directory: '%s'. Currently %d bytes free but need %d bytes." % (dstDir, int(dstBytesAvail), int(requiredSize)))
logger.info("Starting copy of segment dbid %d to location %s" % (int(self.getSegmentDbId()), dstDir))
cpCmd = LocalDirCopy("Copy system data directory", self.getSegmentDataDirectory(), dstDir)
cpCmd.run(validateAfter = True)
res = cpCmd.get_results()
if len(self.__filespaces) > 1:
""" Make directory to hold file spaces """
fullPathFsDir = dstDir + "/" + DESTINATION_FILE_SPACES_DIRECTORY
cmd = FileDirExists( name = "check for existance of template filespace directory"
, directory = fullPathFsDir
)
cmd.run(validateAfter = True)
MakeDirectory.local("gpexpand make directory to hold file spaces", fullPathFsDir)
for oid in self.__filespaces:
MakeDirectory.local("gpexpand make directory to hold file space oid: " + str(oid), fullPathFsDir)
dir = self.__filespaces[oid]
destDir = fullPathFsDir + "/" + str(oid)
MakeDirectory.local("gpexpand make directory to hold file space: " + destDir, destDir)
name = "GpSegCopy %s to %s" % (dir, destDir)
cpCmd = LocalDirCopy(name, dir, destDir)
cpCmd.run(validateAfter = True)
res = cpCmd.get_results()
# Remove the gp_dbid file from the data dir
RemoveFiles.local('Remove gp_dbid file', os.path.normpath(dstDir + '/gp_dbid'))
logger.info("Cleaning up catalog for schema only copy on destination")
# We need 700 permissions or postgres won't start
Chmod.local('set template permissions', dstDir, '0700')
# --------------------------------------------------------------------
# Six simple helper functions to identify what role a segment plays:
# + QD (Query Dispatcher)
# + master
# + standby master
# + QE (Query Executor)
# + primary
# + mirror
# --------------------------------------------------------------------
def isSegmentQD(self):
return self.content < 0
def isSegmentMaster(self, current_role=False):
role = self.role if current_role else self.preferred_role
return self.content < 0 and role == ROLE_PRIMARY
def isSegmentStandby(self, current_role=False):
role = self.role if current_role else self.preferred_role
return self.content < 0 and role == ROLE_MIRROR
def isSegmentQE(self):
return self.content >= 0
def isSegmentPrimary(self, current_role=False):
role = self.role if current_role else self.preferred_role
return self.content >= 0 and role == ROLE_PRIMARY
def isSegmentMirror(self, current_role=False):
role = self.role if current_role else self.preferred_role
return self.content >= 0 and role == ROLE_MIRROR
def isSegmentUp(self):
return self.status == STATUS_UP
def isSegmentDown(self):
return self.status == STATUS_DOWN
def isSegmentModeInChangeLogging(self):
return self.mode == MODE_CHANGELOGGING
def isSegmentModeSynchronized(self):
return self.mode == MODE_SYNCHRONIZED
def isSegmentModeInResynchronization(self):
return self.mode == MODE_RESYNCHRONIZATION
# --------------------------------------------------------------------
# getters
# --------------------------------------------------------------------
def getSegmentDbId(self):
return checkNotNone("dbId", self.dbid)
def getSegmentContentId(self):
return checkNotNone("contentId", self.content)
def getSegmentRole(self):
return checkNotNone("role", self.role)
def getSegmentPreferredRole(self):
return checkNotNone("preferredRole", self.preferred_role)
def getSegmentMode(self):
return checkNotNone("mode", self.mode)
def getSegmentStatus(self):
return checkNotNone("status", self.status)
def getSegmentPort(self):
"""
Returns the listening port for the postmaster for this segment.
Note: With file replication the postmaster will not be active for
mirrors so nothing will be listening on this port, instead the
"replicationPort" is used for primary-mirror communication.
"""
return checkNotNone("port", self.port)
def getSegmentReplicationPort(self):
"""
Returns the replicationPort for the segment, this is the port used for
communication between the primary and mirror for file replication.
Note: is Nullable (so can return None)
"""
return self.replicationPort
def getSegmentHostName(self):
"""
Returns the actual `hostname` for the host
Note: use getSegmentAddress for the network address to use
"""
return self.hostname
def getSegmentAddress(self):
"""
Returns the network address to use to contact the segment (i.e. the NIC address).
"""
return self.address
def getSegmentDataDirectory(self):
"""
Return the primary datadirectory location for the segment.
Note: the datadirectory is just one of the filespace locations
associated with the segment, calling code should be carefull not
to assume that this is the only directory location for this segment.
Todo: evaluate callers of this function to see if they should really
be dealing with a list of filespaces.
"""
return checkNotNone("dataDirectory", self.datadir)
def getSegmentFilespaces(self):
"""
Returns the filespace dictionary of oid->path pairs
"""
return self.__filespaces
# --------------------------------------------------------------------
# setters
# --------------------------------------------------------------------
def setSegmentDbId(self, dbId):
checkNotNone("dbId", dbId)
self.dbid = dbId
def setSegmentContentId(self, contentId):
checkNotNone("contentId", contentId)
if contentId < -1:
raise Exception("Invalid content id %s" % contentId)
self.content = contentId
def setSegmentRole(self, role):
checkNotNone("role", role)
if role not in VALID_ROLES:
raise Exception("Invalid role '%s'" % role)
self.role = role
def setSegmentPreferredRole(self, preferredRole):
checkNotNone("preferredRole", preferredRole)
if preferredRole not in VALID_ROLES:
raise Exception("Invalid preferredRole '%s'" % preferredRole)
self.preferred_role = preferredRole
def setSegmentMode(self, mode):
checkNotNone("mode", mode)
if not mode in VALID_MODE:
raise Exception("Invalid mode '%s'" % mode)
self.mode = mode
def setSegmentStatus(self, status):
checkNotNone("status", status)
if status not in VALID_STATUS:
raise Exception("Invalid status '%s'" % status)
self.status = status
def setSegmentPort(self, port):
checkNotNone("port", port)
checkIsInt("port", port)
self.port = port
def setSegmentReplicationPort(self, replicationPort):
# None is allowed -- don't check nonNone
if replicationPort is not None:
checkIsInt("replicationPort", replicationPort)
self.replicationPort = replicationPort
def setSegmentHostName(self, hostName):
# None is allowed -- don't check
self.hostname = hostName
def setSegmentAddress(self, address):
# None is allowed -- don't check
self.address = address
def setSegmentDataDirectory(self, dataDirectory):
checkNotNone("dataDirectory", dataDirectory)
self.datadir = dataDirectory
def addSegmentFilespace(self, oid, path):
"""
Add a filespace path for this segment.
Throws:
Exception - if a path has already been specified for this segment.
"""
# gpfilespace adds a special filespace with oid=None to indicate
# the filespace that it is currently building, since the filespace
# does not yet exist there is no valid value that could be used.
if oid == None:
if self.__pending_filespace:
raise Exception("Duplicate filespace path for dbid %d" %
self.dbid)
self.__pending_filespace = path
return
# oids should always be integer values > 0
oid = int(oid)
assert(oid > 0)
# The more usual case just sets the filespace in the filespace
# dictionary
if oid in self.__filespaces:
raise Exception("Duplicate filespace path for "
"dbid %d filespace %d" % (self.dbid, oid))
self.__filespaces[oid] = path
def getSegmentPendingFilespace(self):
"""
Returns the pending filespace location for this segment
(called by gpfilespace)
"""
return self.__pending_filespace
# ============================================================================
class Segment:
"""
Used to represent all of the SegmentDBs with the same contentID. Today this
can be at most a primary SegDB and a single mirror SegDB. In the future we
will most likely support multiple mirror segDBs.
Note: This class seems to complicate the implementation of gparray, without
adding much value. Perhaps it should be removed.
"""
primaryDB=None
mirrorDBs=None
# --------------------------------------------------------------------
def __init__(self):
self.mirrorDBs=[]
pass
# --------------------------------------------------------------------
def __str__(self):
return "(Primary: %s, Mirrors: [%s])" % (str(self.primaryDB),
','.join([str(segdb) for segdb in self.mirrorDBs]))
# --------------------------------------------------------------------
def addPrimary(self,segDB):
self.primaryDB=segDB
def addMirror(self,segDB):
self.mirrorDBs.append(segDB)
# --------------------------------------------------------------------
def get_dbs(self):
dbs=[]
if self.primaryDB is not None: # MPP-10886 don't add None to result list
dbs.append(self.primaryDB)
if len(self.mirrorDBs) > 0:
dbs.extend(self.mirrorDBs)
return dbs
# --------------------------------------------------------------------
def get_hosts(self):
hosts=[]
hosts.append(self.primaryDB.hostname)
for m in self.mirrorDBs:
hosts.append(m.hostname)
return hosts
def is_segment_pair_valid(self):
"""Validates that the primary/mirror pair are in a valid state"""
for mirror_db in self.mirrorDBs:
prim_status = self.primaryDB.getSegmentStatus()
prim_mode = self.primaryDB.getSegmentMode()
mirror_status = mirror_db.getSegmentStatus()
mirror_role = mirror_db.getSegmentMode()
if (prim_status, prim_mode, mirror_status, mirror_role) not in VALID_SEGMENT_STATES:
return False
return True
def get_active_primary(self):
if self.primaryDB.isSegmentPrimary(current_role=True):
return self.primaryDB
else:
for mirror in self.mirrorDBs:
if mirror.isSegmentPrimary(current_role=True):
return mirror
def get_primary_dbid(self):
return self.primaryDB.getSegmentDbId()
# --------------------------------------------------------------------
# --------------------------------------------------------------------
class SegmentRow():
def __init__(self, content, isprimary, dbid, host, address, port, fulldir, prPort, fileSpaceDictionary = None):
self.content = content
self.isprimary = isprimary
self.dbid = dbid
self.host = host
self.address = address
self.port = port
self.fulldir = fulldir
self.prPort = prPort
self.fileSpaceDictionary = fileSpaceDictionary
def __str__(self):
retVal = "" + \
"content = " + str(self.content) + "\n" + \
"isprimary =" + str(self.isprimary) + "\n" + \
"dbid = " + str(self.dbid) + "\n" + \
"host = " + str(self.host) + "\n" + \
"address = " + str(self.address) + "\n" + \
"port = " + str(self.port) + "\n" + \
"fulldir = " + str(self.fulldir) + "\n" + \
"prPort = " + str(self.prPort) + "\n" + \
"fileSpaceDictionary = " + str(self.fileSpaceDictionary) + "\n" + "\n"
def createSegmentRows( hostlist
, interface_list
, primary_list
, primary_portbase
, mirror_type
, mirror_list
, mirror_portbase
, dir_prefix
, primary_replication_portbase
, mirror_replication_portbase
, primary_fs_list = None
, mirror_fs_list = None
):
"""
This method will return a list of SegmentRow objects that represent new segments on each host.
The "hostlist" parameter contains both existing hosts as well as any new hosts that are
a result of expansion.
"""
rows =[]
dbid = 0
content = 0
for host in hostlist:
isprimary='t'
port=primary_portbase
prPort = primary_replication_portbase
index = 0
for pdir in primary_list:
fulldir = "%s/%s%d" % (pdir,dir_prefix,content)
if len(interface_list) > 0:
interfaceNumber = interface_list[index % len(interface_list)]
address = host + '-' + str(interfaceNumber)
else:
address = host
fsDict = {}
if primary_fs_list != None and len(primary_fs_list) > index:
fsDict = primary_fs_list[index]
fullFsDict = {}
for oid in fsDict:
fullFsDict[oid] = "%s/%s%d" % (fsDict[oid], dir_prefix, content)
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = host
, address = address
, port = port
, fulldir = fulldir
, prPort = prPort
, fileSpaceDictionary = fullFsDict
) )
port += 1
if prPort != None:
prPort += 1
content += 1
dbid += 1
index = index + 1
#mirrors
if mirror_type is None or mirror_type == 'none':
return rows
elif mirror_type.lower().strip() == 'spread':
#TODO: must be sure to put mirrors on a different subnet than primary.
# this is a general problem for GPDB these days. perhaps we should
# add something to gpdetective to be able to detect this and fix it.
# best to have the interface mapping stuff 1st.
content=0
isprimary='f'
num_hosts = len(hostlist)
num_dirs=len(primary_list)
if num_hosts <= num_dirs:
raise Exception("Not enough hosts for spread mirroring. You must have more hosts than primary segments per host")
mirror_port = {}
mirror_replication_port = {}
mirror_host_offset=1
last_mirror_offset=1
for host in hostlist:
mirror_host_offset = last_mirror_offset + 1
last_mirror_offset += 1
index = 0
for mdir in mirror_list:
fulldir = "%s/%s%d" % (mdir,dir_prefix,content)
fsDict = {}
if mirror_fs_list != None and len(mirror_fs_list) > index:
fsDict = mirror_fs_list[index]
fullFsDict = {}
for oid in fsDict:
fullFsDict[oid] = "%s/%s%d" % (fsDict[oid], dir_prefix, content)
mirror_host = hostlist[mirror_host_offset % num_hosts]
if mirror_host == host:
mirror_host_offset += 1
mirror_host = hostlist[mirror_host_offset % num_hosts]
if len(interface_list) > 0:
interfaceNumber = interface_list[mirror_host_offset % len(interface_list)]
address = mirror_host + '-' + str(interfaceNumber)
else:
address = mirror_host
if not mirror_port.has_key(mirror_host):
mirror_port[mirror_host] = mirror_portbase
if not mirror_replication_port.has_key(mirror_host):
mirror_replication_port[mirror_host] = mirror_replication_portbase
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = mirror_host
, address = address
, port = mirror_port[mirror_host]
, fulldir = fulldir
, prPort = mirror_replication_port[mirror_host]
, fileSpaceDictionary = fullFsDict
) )
mirror_port[mirror_host] += 1
mirror_replication_port[mirror_host] += 1
content += 1
dbid += 1
mirror_host_offset += 1
index = index + 1
elif mirror_type.lower().strip() == 'grouped':
content = 0
num_hosts = len(hostlist)
if num_hosts < 2:
raise Exception("Not enough hosts for grouped mirroring. You must have at least 2")
#we'll pick our mirror host to be 1 host "ahead" of the primary.
mirror_host_offset = 1
isprimary='f'
for host in hostlist:
mirror_host = hostlist[mirror_host_offset % num_hosts]
mirror_host_offset += 1
port = mirror_portbase
mrPort = mirror_replication_portbase
index = 0
for mdir in mirror_list:
fulldir = "%s/%s%d" % (mdir,dir_prefix,content)
if len(interface_list) > 0:
interfaceNumber = interface_list[(index + 1) % len(interface_list)]
address = mirror_host + '-' + str(interfaceNumber)
else:
address = mirror_host
fsDict = {}
if mirror_fs_list != None and len(mirror_fs_list) > index:
fsDict = mirror_fs_list[index]
fullFsDict = {}
for oid in fsDict:
fullFsDict[oid] = "%s/%s%d" % (fsDict[oid], dir_prefix, content)
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = mirror_host
, address = address
, port = port
, fulldir = fulldir
, prPort = mrPort
, fileSpaceDictionary = fullFsDict
) )
port += 1
mrPort +=1
content += 1
dbid += 1
index = index + 1
else:
raise Exception("Invalid mirror type specified: %s" % mirror_type)
return rows
#========================================================================
def createSegmentRowsFromSegmentList( newHostlist
, interface_list
, primary_segment_list
, primary_portbase
, mirror_type
, mirror_segment_list
, mirror_portbase
, dir_prefix
, primary_replication_portbase
, mirror_replication_portbase
):
"""
This method will return a list of SegmentRow objects that represent an expansion of existing
segments on new hosts.
"""
rows = []
dbid = 0
content = 0
interfaceDict = {}
for host in newHostlist:
isprimary='t'
port=primary_portbase
prPort = primary_replication_portbase
index = 0
for pSeg in primary_segment_list:
if len(interface_list) > 0:
interfaceNumber = interface_list[index % len(interface_list)]
address = host + '-' + str(interfaceNumber)
interfaceDict[content] = index % len(interface_list)
else:
address = host
newFulldir = "%s/%s%d" % (GpDB.getDataDirPrefix(pSeg.getSegmentDataDirectory()), dir_prefix, content)
newFileSpaceDictionary = GpDB.getFileSpaceDirsWithNewSuffix(pSeg.getSegmentFilespaces(), dir_prefix + str(content), includeSystemFilespace = False)
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = host
, address = address
, port = port
, fulldir = newFulldir
, prPort = prPort
, fileSpaceDictionary = newFileSpaceDictionary
) )
port += 1
if prPort != None:
prPort += 1
content += 1
dbid += 1
index += 1
#mirrors
if mirror_type is None or mirror_type == 'none':
return rows
elif mirror_type.lower().strip() == 'spread':
content=0
isprimary='f'
num_hosts = len(newHostlist)
num_dirs=len(primary_segment_list)
if num_hosts <= num_dirs:
raise Exception("Not enough hosts for spread mirroring. You must have more hosts than primary segments per host")
mirror_port = {}
mirror_replication_port = {}
mirror_host_offset=1
last_mirror_offset=0
for host in newHostlist:
mirror_host_offset = last_mirror_offset + 1
last_mirror_offset += 1
for mSeg in mirror_segment_list:
newFulldir = "%s/%s%d" % (GpDB.getDataDirPrefix(mSeg.getSegmentDataDirectory()), dir_prefix, content)
newFileSpaceDictionary = GpDB.getFileSpaceDirsWithNewSuffix(mSeg.getSegmentFilespaces(), dir_prefix + str(content), includeSystemFilespace = False)
mirror_host = newHostlist[mirror_host_offset % num_hosts]
if mirror_host == host:
mirror_host_offset += 1
mirror_host = newHostlist[mirror_host_offset % num_hosts]
if len(interface_list) > 0:
interfaceNumber = interface_list[(interfaceDict[content] + 1) % len(interface_list)]
address = mirror_host + '-' + str(interfaceNumber)
else:
address = mirror_host
if not mirror_port.has_key(mirror_host):
mirror_port[mirror_host] = mirror_portbase
if not mirror_replication_port.has_key(mirror_host):
mirror_replication_port[mirror_host] = mirror_replication_portbase
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = mirror_host
, address = address
, port = mirror_port[mirror_host]
, fulldir = newFulldir
, prPort = mirror_replication_port[mirror_host]
, fileSpaceDictionary = newFileSpaceDictionary
) )
mirror_port[mirror_host] += 1
mirror_replication_port[mirror_host] += 1
content += 1
dbid += 1
mirror_host_offset += 1
elif mirror_type.lower().strip() == 'grouped':
content = 0
num_hosts = len(newHostlist)
if num_hosts < 2:
raise Exception("Not enough hosts for grouped mirroring. You must have at least 2")
#we'll pick our mirror host to be 1 host "ahead" of the primary.
mirror_host_offset = 1
isprimary='f'
for host in newHostlist:
mirror_host = newHostlist[mirror_host_offset % num_hosts]
mirror_host_offset += 1
port = mirror_portbase
mrPort = mirror_replication_portbase
index = 0
for mSeg in mirror_segment_list:
if len(interface_list) > 0:
interfaceNumber = interface_list[(interfaceDict[content] + 1) % len(interface_list)]
address = mirror_host + '-' + str(interfaceNumber)
else:
address = mirror_host
newFulldir = "%s/%s%d" % (GpDB.getDataDirPrefix(mSeg.getSegmentDataDirectory()), dir_prefix, content)
newFileSpaceDictionary = GpDB.getFileSpaceDirsWithNewSuffix(mSeg.getSegmentFilespaces(), dir_prefix + str(content), includeSystemFilespace = False)
rows.append( SegmentRow( content = content
, isprimary = isprimary
, dbid = dbid
, host = mirror_host
, address = address
, port = port
, fulldir = newFulldir
, prPort = mrPort
, fileSpaceDictionary = newFileSpaceDictionary
) )
port += 1
mrPort +=1
content += 1
dbid += 1
index = index + 1
else:
raise Exception("Invalid mirror type specified: %s" % mirror_type)
return rows
#========================================================================
def parseTrueFalse(value):
if value.lower() == 'f':
return False
elif value.lower() == 't':
return True
raise Exception('Invalid true/false value')
# TODO: Destroy this (MPP-7686)
# Now that "hostname" is a distinct field in gp_segment_configuration
# attempting to derive hostnames from interaface names should be eliminated.
#
def get_host_interface(full_hostname):
(host_part, inf_num) = ['-'.join(full_hostname.split('-')[:-1]),
full_hostname.split('-')[-1]]
if host_part == '' or not inf_num.isdigit():
return (full_hostname, None)
else:
return (host_part, inf_num)
class GpFilespaceObj:
"""
List information for a filespace, as stored in pg_filespace
"""
def __init__(self, oid, name):
self.__oid = oid
self.__name = name
def getOid(self):
return self.__oid
def getName(self):
return self.__name
def isSystemFilespace(self):
return self.__oid == SYSTEM_FILESPACE
# ============================================================================
class GpArray:
"""
GpArray is a python class that describes a Greenplum array.
A Greenplum array consists of:
master - The primary QD for the array
standby master - The mirror QD for the array [optional]
segment array - an array of segments within the cluster
Each segment is either a single GpDB object, or a primary/mirror pair.
It can be initialized either from a database connection, in which case
it discovers the configuration information by examining the catalog, or
via a configuration file.
"""
# --------------------------------------------------------------------
def __init__(self, segments, segmentsAsLoadedFromDb=None, strategyLoadedFromDb=None):
"""
segmentsInDb is used only be the configurationImpl* providers; it is used to track the state of the
segments in the database
TODO:
"""
self.master = None
self.standbyMaster = None
self.segments = []
self.expansionSegments=[]
self.numPrimarySegments = 0
self.recoveredSegmentDbids = []
self.__version = None
self.__segmentsAsLoadedFromDb = segmentsAsLoadedFromDb
self.__strategyLoadedFromDb = strategyLoadedFromDb
self.__strategy = FAULT_STRATEGY_NONE
self.san_mount_by_dbid = {}
self.san_mounts = {}
self.setFilespaces([])
for segdb in segments:
# Handle QD nodes
if segdb.isSegmentMaster(True):
if self.master != None:
logger.error("multiple master dbs defined")
raise Exception("GpArray - multiple master dbs defined")
self.master = segdb
elif segdb.isSegmentStandby(True):
if self.standbyMaster != None:
logger.error("multiple standby master dbs defined")
raise Exception("GpArray - multiple standby master dbs defined")
self.standbyMaster = segdb
# Handle regular segments
elif segdb.isSegmentQE():
self.addSegmentDb(segdb)
else:
# Not a master, standbymaster, primary, or mirror?
# shouldn't even be possible.
logger.error("FATAL - invalid dbs defined")
raise Exception("Error: GpArray() - invalid dbs defined")
# Make sure we have a master db
if self.master is None:
logger.error("FATAL - no master dbs defined!")
raise Exception("Error: GpArray() - no master dbs defined")
def __str__(self):
return "Master: %s\nStandby: %s\nSegments: %s" % (str(self.master),
str(self.standbyMaster) if self.standbyMaster else 'Not Configured',
"\n".join([str(seg) for seg in self.segments]))
def addSegmentDb(self, segdb):
content = segdb.getSegmentContentId()
while len(self.segments) <= content:
self.segments.insert(content, Segment())
seg = self.segments[content]
if segdb.isSegmentPrimary(True):
seg.addPrimary(segdb)
self.numPrimarySegments += 1
else:
seg.addMirror(segdb)
# --------------------------------------------------------------------
def isStandardArray(self):
"""
This method will check various aspects of the array to see if it looks like a standard
setup. It returns two values:
True or False depending on if the array looks like a standard array.
If message if the array does not look like a standard array.
"""
try:
# Do all the segments contain the same number of primary and mirrors.
firstNumPrimaries = 0
firstNumMirrors = 0
firstHost = ""
first = True
dbList = self.getDbList(includeExpansionSegs = True)
gpdbByHost = self.getSegmentsByHostName(dbList)
for host in gpdbByHost:
gpdbList = gpdbByHost[host]
if len(gpdbList) == 1 and gpdbList[0].isSegmentQD() == True:
# This host has one master segment and nothing else
continue
if len(gpdbList) == 2 and gpdbList[0].isSegmentQD() and gpdbList[1].isSegmentQD():
# This host has the master segment and its mirror and nothing else
continue
numPrimaries = 0
numMirrors = 0
for gpdb in gpdbList:
if gpdb.isSegmentQD() == True:
continue
if gpdb.isSegmentPrimary() == True:
numPrimaries = numPrimaries + 1
else:
numMirrors = numMirrors + 1
if first == True:
firstNumPrimaries = numPrimaries
firstNumMirrors = numMirrors
firstHost = host
first = False
if numPrimaries != firstNumPrimaries:
raise Exception("The number of primary segments is not consistent across all nodes: %s != %s." % (host, firstHost))
elif numMirrors != firstNumMirrors:
raise Exception("The number of mirror segments is not consistent across all nodes. %s != %s." % (host, firstHost))
# Make sure the address all have the same suffix "-<n>" (like -1, -2, -3...)
firstSuffixList = []
first = True
suffixList = []
for host in gpdbByHost:
gpdbList = gpdbByHost[host]
for gpdb in gpdbList:
if gpdb.isSegmentMaster() == True:
continue
address = gpdb.getSegmentAddress()
if address == host:
if len(suffixList) == 0:
continue
else:
raise Exception("The address value for %s is the same as the host name, but other addresses on the host are not." % address)
suffix = address.split('-')[-1]
if suffix.isdigit() == False:
raise Exception("The address value for %s does not correspond to a standard address." % address)
suffixList.append(suffix)
suffixList.sort()
if first == True:
firstSuffixList = suffixList
first = False
if suffixList != firstSuffixList:
raise Exception("The address list for %s doesn't not have the same pattern as %s." % (str(suffixList), str(firstSuffixList)))
except Exception, e:
# Assume any exception implies a non-standard array
return False, str(e)
return True, ""
# --------------------------------------------------------------------
@staticmethod
def initFromCatalog(dbURL, utility=False):
"""
Factory method, initializes a GpArray from provided database URL
"""
conn = dbconn.connect(dbURL, utility)
# Get the version from the database:
version_str = None
for row in dbconn.execSQL(conn, "SELECT version()"):
version_str = row[0]
version = GpVersion(version_str)
if version.getVersionRelease() in ("3.0", "3.1", "3.2", "3.3"):
# In older releases we get the fault strategy using the
# gp_fault_action guc.
strategy_rows = dbconn.execSQL(conn, "show gp_fault_action")
# Note: Mode may not be "right", certainly 4.0 concepts of mirroring
# mode do not apply to 3.x, so it depends on how the scripts are
# making use of mode. For now it is initialized to synchronized.
#
# Note: hostname is initialized to null since the catalog does not
# contain this information. Initializing a hostcache using the
# resulting gparray will automatically fill in a value for hostname.
#
# Note: this should be kept in sync with the code in
# GpDB.InitFromString() code for initializing old catalog formats.
config_rows = dbconn.execSQL(conn, '''
SELECT dbid, content,
case when isprimary then 'p' else 'm' end as role,
case when definedprimary then 'p' else 'm' end as preferred_role,
's' as mode,
case when valid then 'u' else 'd' end as status,
null as hostname,
hostname as address,
port,
null as replication_port,
%s as fsoid,
datadir as fselocation
FROM pg_catalog.gp_configuration
ORDER BY content, preferred_role DESC
''' % str(SYSTEM_FILESPACE))
# No SAN support in these older releases.
san_segs_rows = []
san_rows = []
# no filespace support in older releases.
filespaceArr = []
else:
strategy_rows = dbconn.execSQL(conn, '''
SELECT fault_strategy FROM gp_fault_strategy
''')
config_rows = dbconn.execSQL(conn, '''
SELECT dbid, content, role, preferred_role, mode, status,
hostname, address, port, replication_port, fs.oid,
fselocation
FROM pg_catalog.gp_segment_configuration
JOIN pg_catalog.pg_filespace_entry on (dbid = fsedbid)
JOIN pg_catalog.pg_filespace fs on (fsefsoid = fs.oid)
ORDER BY content, preferred_role DESC, fs.oid
''')
san_segs_rows = dbconn.execSQL(conn, '''
SELECT dbid, content, status, unnest(san_mounts)
FROM pg_catalog.gp_segment_configuration
WHERE content >= 0
ORDER BY content, dbid
''')
san_rows = dbconn.execSQL(conn, '''
SELECT mountid, active_host, san_type,
primary_host, primary_mountpoint, primary_device,
mirror_host, mirror_mountpoint, mirror_device
FROM pg_catalog.gp_san_configuration
ORDER BY mountid
''')
filespaceRows = dbconn.execSQL(conn, '''
SELECT oid, fsname
FROM pg_filespace
ORDER BY fsname;
''')
filespaceArr = [GpFilespaceObj(fsRow[0], fsRow[1]) for fsRow in filespaceRows]
# Todo: add checks that all segments should have the same filespaces?
recoveredSegmentDbids = []
segments = []
seg = None
for row in config_rows:
# Extract fields from the row
(dbid, content, role, preferred_role, mode, status, hostname,
address, port, replicationPort, fsoid, fslocation) = row
# If we have segments which have recovered, record them.
if preferred_role != role and content >= 0:
if mode == MODE_SYNCHRONIZED and status == STATUS_UP:
recoveredSegmentDbids.append(dbid)
# The query returns all the filespaces for a segment on separate
# rows. If this row is the same dbid as the previous row simply
# add this filespace to the existing list, otherwise create a
# new segment.
if seg and seg.getSegmentDbId() == dbid:
seg.addSegmentFilespace(fsoid, fslocation)
else:
seg = GpDB(content, preferred_role, dbid, role, mode, status,
hostname, address, port, fslocation, replicationPort)
segments.append(seg)
datcatloc = dbconn.execSQL(conn, '''
select fsloc.dbid, fsloc.fselocation || '/' ||
case when db.dattablespace = 1663
then 'base'
else db.dattablespace::text
end || '/'||db.oid as catloc
from pg_Database db, pg_tablespace ts,
(SELECT dbid, fs.oid, fselocation
FROM pg_catalog.gp_segment_configuration
JOIN pg_catalog.pg_filespace_entry on (dbid = fsedbid)
JOIN pg_catalog.pg_filespace fs on (fsefsoid = fs.oid)) fsloc
where db.dattablespace = ts.oid
and ts.spcfsoid = fsloc.oid''')
conn.close()
catlocmap = {}
for row in datcatloc:
if catlocmap.has_key(row[0]):
catlocmap[row[0]].append(row[1])
else:
catlocmap[row[0]] = [row[1]]
for seg in segments:
seg.catdirs = catlocmap[seg.dbid]
origSegments = [seg.copy() for seg in segments]
if strategy_rows.rowcount == 0:
raise Exception("Database does not contain gp_fault_strategy entry")
if strategy_rows.rowcount > 1:
raise Exception("Database has too many gp_fault_strategy entries")
strategy = strategy_rows.fetchone()[0]
array = GpArray(segments, origSegments, strategy)
array.__version = version
array.recoveredSegmentDbids = recoveredSegmentDbids
array.setFaultStrategy(strategy)
array.setSanConfig(san_rows, san_segs_rows)
array.setFilespaces(filespaceArr)
return array
# --------------------------------------------------------------------
@staticmethod
def initFromFile(filename):
"""
Factory method: creates a GpArray from an input file
(called by gpexpand.)
Note: Currently this is only used by the gpexpand rollback facility,
and by gpsuspend utility,
there is currently NO expectation that this file format is saved
on disk in any long term fashion.
Format changes of the file are acceptable until this assumption is
changed, but initFromFile and dumpToFile must be kept in parity.
"""
segdbs=[]
fp = open(filename, 'r')
for line in fp:
segdbs.append(GpDB.initFromString(line))
fp.close()
return GpArray(segdbs)
# --------------------------------------------------------------------
def is_array_valid(self):
"""Checks that each primary/mirror pair is in a valid state"""
for seg in self.segments:
if not seg.is_segment_pair_valid():
return False
return True
# --------------------------------------------------------------------
def dumpToFile(self, filename):
"""
Dumps a GpArray to a file (called by gpexpand)
Note: See notes above for initFromFile()
"""
fp = open(filename, 'w')
for gpdb in self.getDbList():
fp.write(repr(gpdb) + '\n')
fp.close()
# --------------------------------------------------------------------
def setSanConfig(self, san_row_list, san_segs_list):
"""
Sets up the san-config.
san_row_list is essentially the contents of gp_san_config (which may be empty)
san_segs_list is the content of the san_mounts field of gp_segment_configuration
(also potentially empty), unnested.
These are raw results sets. We build two maps:
Map1: from dbid to a list of san-mounts.
Map2: from mount-id to the san_config attributes.
The code below has to match the SQL inside initFromCatalog()
"""
# First collect the "unnested" mount-ids into a list.
dbid_map = {}
for row in san_segs_list:
(dbid, content, status, mountid) = row
if dbid_map.has_key(dbid):
(status, content, mount_list) = dbid_map[dbid]
dbid_map[dbid] = (status, content, mount_list.append(mountid))
else:
dbid_map[dbid] = (status, content, [mountid])
# dbid_map now contains a flat mapping from dbid -> (status, list of mountpoints)
san_map = {}
for row in san_row_list:
(mountid, active, type, p_host, p_mp, p_dev, m_host, m_mp, m_dev) = row
san_map[mountid] = {'active':active, 'type':type,
'primaryhost':p_host, 'primarymountpoint':p_mp, 'primarydevice':p_dev,
'mirrorhost':m_host, 'mirrormountpoint':m_mp, 'mirrordevice':m_dev
}
self.san_mount_by_dbid = dbid_map
self.san_mounts = san_map
# --------------------------------------------------------------------
def getSanConfigMaps(self):
return (self.san_mounts, self.san_mount_by_dbid)
# --------------------------------------------------------------------
def setFaultStrategy(self, strategy):
"""
Sets the fault strategy of the array.
The input strategy should either be a valid fault strategy code:
['n', 'f', ...]
Or it should be a valid fault strategy label:
['none', 'physical mirroring', ...]
The reason that we need to accept both forms of input is that fault
strategy is modeled differently in the catalog depending on the catalog
version.
In 3.x fault strategy is stored using the label via the gp_fault_action
guc.
In 4.0 fault strategy is stored using the code via the gp_fault_strategy
table.
"""
checkNotNone("strategy", strategy)
# Try to lookup the strategy as a label
for (key, value) in FAULT_STRATEGY_LABELS.iteritems():
if value == strategy:
strategy = key
break
if strategy not in VALID_FAULT_STRATEGY:
raise Exception("Invalid fault strategy '%s'" % strategy)
self.__strategy = strategy
# --------------------------------------------------------------------
def getFaultStrategy(self):
"""
Will return a string matching one of the FAULT_STRATEGY_* constants
"""
if self.__strategy not in VALID_FAULT_STRATEGY:
raise Exception("Fault strategy is not set correctly: '%s'" %
self.__strategy)
return self.__strategy
def setFilespaces(self, filespaceArr):
"""
@param filespaceArr of GpFilespaceObj objects
"""
self.__filespaceArr = [fs for fs in filespaceArr]
def getFilespaces(self, includeSystemFilespace=True):
"""
@return a newly allocated list of GpFilespaceObj objects, will have been sorted by filespace name
"""
if includeSystemFilespace:
return [fs for fs in self.__filespaceArr]
else:
return [fs for fs in self.__filespaceArr if not fs.isSystemFilespace()]
# --------------------------------------------------------------
def getFileSpaceName(self, filespaceOid):
retValue = None
if self.__filespaceArr != None:
for entry in self.__filespaceArr:
if entry.getOid() == filespaceOid:
retValue = entry.getName()
break
return retValue
# --------------------------------------------------------------
def getFileSpaceOid(self, filespaceName):
retValue = None
if self.__filespaceArr != None:
for entry in self.__filespaceArr:
if entry.getName() == filespaceName:
retValue = entry.getOid()
break
return retValue
# --------------------------------------------------------------------
def getDbList(self, includeExpansionSegs=False):
"""
Return a list of all GpDb objects that make up the array
"""
dbs=[]
dbs.append(self.master)
if self.standbyMaster:
dbs.append(self.standbyMaster)
if includeExpansionSegs:
dbs.extend(self.getSegDbList(True))
else:
dbs.extend(self.getSegDbList())
return dbs
# --------------------------------------------------------------------
def getHostList(self, includeExpansionSegs = False):
"""
Return a list of all Hosts that make up the array
"""
hostList = []
hostList.append(self.master.getSegmentHostName())
if self.standbyMaster:
hostList.append(self.standbyMaster.getSegmentHostName())
dbList = self.getDbList(includeExpansionSegs = includeExpansionSegs)
for db in dbList:
if db.getSegmentHostName() in hostList:
continue
else:
hostList.append(db.getSegmentHostName())
return hostList
def getDbIdToPeerMap(self):
"""
Returns a map that maps a dbid to the peer segment for that dbid
"""
contentIdToSegments = {}
for seg in self.getSegDbList():
arr = contentIdToSegments.get(seg.getSegmentContentId())
if arr is None:
arr = []
contentIdToSegments[seg.getSegmentContentId()] = arr
arr.append(seg)
result = {}
for contentId, arr in contentIdToSegments.iteritems():
if len(arr) == 1:
pass
elif len(arr) != 2:
raise Exception("Content %s has more than two segments"% contentId)
else:
result[arr[0].getSegmentDbId()] = arr[1]
result[arr[1].getSegmentDbId()] = arr[0]
return result
# --------------------------------------------------------------------
def getSegDbList(self, includeExpansionSegs=False):
"""Return a list of all GpDb objects for all segments in the array"""
dbs=[]
for seg in self.segments:
dbs.extend(seg.get_dbs())
if includeExpansionSegs:
for seg in self.expansionSegments:
dbs.extend(seg.get_dbs())
return dbs
# --------------------------------------------------------------------
def getSegmentList(self, includeExpansionSegs=False):
"""Return a list of all GpDb objects for all segments in the array"""
dbs=[]
dbs.extend(self.segments)
if includeExpansionSegs:
dbs.extend(self.expansionSegments)
return dbs
# --------------------------------------------------------------------
def getSegDbMap(self):
"""
Return a map of all GpDb objects that make up the array.
"""
dbsMap = {}
for db in self.getSegDbList():
dbsMap[db.getSegmentDbId()] = db
return dbsMap
# --------------------------------------------------------------------
def getExpansionSegDbList(self):
"""Returns a list of all GpDb objects that make up the new segments
of an expansion"""
dbs=[]
for seg in self.expansionSegments:
dbs.extend(seg.get_dbs())
return dbs
# --------------------------------------------------------------------
def getSegmentContainingDb(self, db):
for seg in self.segments:
for segDb in seg.get_dbs():
if db.getSegmentDbId() == segDb.getSegmentDbId():
return seg
return None
# --------------------------------------------------------------------
def getExpansionSegmentContainingDb(self, db):
for seg in self.expansionSegments:
for segDb in seg.get_dbs():
if db.getSegmentDbId() == segDb.getSegmentDbId():
return seg
return None
# --------------------------------------------------------------------
def get_invalid_segdbs(self):
dbs=[]
for seg in self.segments:
segdb = seg.primaryDB
if not segdb.valid:
dbs.append(segdb)
for db in seg.mirrorDBs:
if not db.valid:
dbs.append(db)
return dbs
# --------------------------------------------------------------------
def get_synchronized_segdbs(self):
dbs=[]
for seg in self.segments:
segdb = seg.primaryDB
if segdb.mode == MODE_SYNCHRONIZED:
dbs.append(segdb)
for segdb in seg.mirrorDBs:
if segdb.mode == MODE_SYNCHRONIZED:
dbs.append(segdb)
return dbs
# --------------------------------------------------------------------
def get_unbalanced_segdbs(self):
dbs=[]
for seg in self.segments:
for segdb in seg.get_dbs():
if segdb.preferred_role != segdb.role:
dbs.append(segdb)
return dbs
# --------------------------------------------------------------------
def get_unbalanced_primary_segdbs(self):
dbs = [seg for seg in self.get_unbalanced_segdbs() if seg.role == ROLE_PRIMARY]
return dbs
# --------------------------------------------------------------------
def get_inactive_mirrors_segdbs(self):
if self.__strategy != FAULT_STRATEGY_SAN:
return []
dbs=[]
for seg in self.segments:
segdb = seg.primaryDB
for db in seg.mirrorDBs:
dbs.append(db)
return dbs
# --------------------------------------------------------------------
def get_valid_segdbs(self):
dbs=[]
for seg in self.segments:
db = seg.primaryDB
if db.valid:
dbs.append(db)
for db in seg.mirrorDBs:
if db.valid:
dbs.append(db)
return dbs
# --------------------------------------------------------------------
def get_hostlist(self, includeMaster=True):
hosts=[]
if includeMaster:
hosts.append(self.master.hostname)
if self.standbyMaster is not None:
hosts.append(self.standbyMaster.hostname)
for seg in self.segments:
hosts.extend(seg.get_hosts())
return hosts
# --------------------------------------------------------------------
def get_max_dbid(self,includeExpansionSegs=False):
"""Returns the maximum dbid in the array. If includeExpansionSegs
is True, this includes the expansion segment array in the search"""
dbid = 0
for db in self.getDbList(includeExpansionSegs):
if db.getSegmentDbId() > dbid:
dbid = db.getSegmentDbId()
return dbid
# --------------------------------------------------------------------
def get_max_contentid(self, includeExpansionSegs=False):
"""Returns the maximum contentid in the array. If includeExpansionSegs
is True, this includes the expansion segment array in the search"""
content = 0
for db in self.getDbList(includeExpansionSegs):
if db.content > content:
content = db.content
return content
# --------------------------------------------------------------------
def get_segment_count(self):
return len(self.segments)
# --------------------------------------------------------------------
def get_min_primary_port(self):
"""Returns the minimum primary segment db port"""
min_primary_port = self.segments[0].primaryDB.port
for seg in self.segments:
if seg.primaryDB.port < min_primary_port:
min_primary_port = seg.primaryDB.port
return min_primary_port
# --------------------------------------------------------------------
def get_max_primary_port(self):
"""Returns the maximum primary segment db port"""
max_primary_port = self.segments[0].primaryDB.port
for seg in self.segments:
if seg.primaryDB.port > max_primary_port:
max_primary_port = seg.primaryDB.port
return max_primary_port
# --------------------------------------------------------------------
def get_min_mirror_port(self):
"""Returns the minimum mirror segment db port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
min_mirror_port = self.segments[0].mirrorDBs[0].port
for seg in self.segments:
for db in seg.mirrorDBs:
if db.port < min_mirror_port:
min_mirror_port = db.port
return min_mirror_port
# --------------------------------------------------------------------
def get_max_mirror_port(self):
"""Returns the maximum mirror segment db port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
max_mirror_port = self.segments[0].mirrorDBs[0].port
for seg in self.segments:
for db in seg.mirrorDBs:
if db.port > max_mirror_port:
max_mirror_port = db.port
return max_mirror_port
# --------------------------------------------------------------------
def get_min_primary_replication_port(self):
"""Returns the minimum primary segment db replication port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
min_primary_replication_port = self.segments[0].primaryDB.replicationPort
for seg in self.segments:
if seg.primaryDB.replicationPort < min_primary_replication_port:
min_primary_replication_port = seg.primaryDB.replicationPort
return min_primary_replication_port
# --------------------------------------------------------------------
def get_max_primary_replication_port(self):
"""Returns the maximum primary segment db replication port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
max_primary_replication_port = self.segments[0].primaryDB.replicationPort
for seg in self.segments:
if seg.primaryDB.replicationPort > max_primary_replication_port:
max_primary_replication_port = seg.primaryDB.replicationPort
return max_primary_replication_port
# --------------------------------------------------------------------
def get_min_mirror_replication_port(self):
"""Returns the minimum mirror segment db replication port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
min_mirror_replication_port = self.segments[0].mirrorDBs[0].replicationPort
for seg in self.segments:
for db in seg.mirrorDBs:
if db.replicationPort < min_mirror_replication_port:
min_mirror_replication_port = db.replicationPort
return min_mirror_replication_port
# --------------------------------------------------------------------
def get_max_mirror_replication_port(self):
"""Returns the maximum mirror segment db replication port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
max_mirror_replication_port = self.segments[0].mirrorDBs[0].replicationPort
for seg in self.segments:
for db in seg.mirrorDBs:
if db.replicationPort > max_mirror_replication_port:
max_mirror_replication_port = db.replicationPort
return max_mirror_replication_port
# --------------------------------------------------------------------
def get_interface_numbers(self):
"""Returns interface numbers in the array. Assumes that addresses are named
<hostname>-<int_num>. If the nodes just have <hostname> then an empty
array is returned."""
interface_nums = []
primary_hostname = self.segments[0].primaryDB.hostname
primary_address_list = []
dbList = self.getDbList()
for db in dbList:
if db.isSegmentQD() == True:
continue
if db.getSegmentHostName() == primary_hostname:
if db.getSegmentAddress() not in primary_address_list:
primary_address_list.append(db.getSegmentAddress())
for address in primary_address_list:
if address.startswith(primary_hostname) == False or len(primary_hostname) + 2 > len(address):
return []
suffix = address[len(primary_hostname):]
if len(suffix) < 2 or suffix[0] != '-' or suffix[1:].isdigit() == False:
return []
interface_nums.append(suffix[1:])
return interface_nums
# --------------------------------------------------------------------
def get_primary_count(self):
return self.numPrimarySegments
# --------------------------------------------------------------------
def get_mirroring_enabled(self):
"""Returns True if mirrors are defined"""
return len(self.segments[0].mirrorDBs) != 0
# --------------------------------------------------------------------
def get_list_of_primary_segments_on_host(self, hostname):
retValue = []
for db in self.getDbList():
if db.isSegmentPrimary(False) == True and db.getSegmentHostName() == hostname:
retValue.append(db)
return retValue
# --------------------------------------------------------------------
def get_list_of_mirror_segments_on_host(self, hostname):
retValue = []
for db in self.getDbList():
if db.isSegmentMirror(False) == True and db.getSegmentHostName() == hostname:
retValue.append(db)
return retValue
# --------------------------------------------------------------------
def get_primary_root_datadirs(self):
"""
Returns a list of primary data directories minus the <prefix><contentid>
NOTE 1:
This currently assumes that all segments are configured the same
and gets the results only from the host of segment 0
NOTE 2:
The determination of hostname is based on faulty logic
"""
primary_datadirs = []
seg0_hostname = self.segments[0].primaryDB.getSegmentAddress()
(seg0_hostname, inf_num) = get_host_interface(seg0_hostname)
for db in self.getDbList():
if db.isSegmentPrimary(False) and db.getSegmentAddress().startswith(seg0_hostname):
primary_datadirs.append(db.datadir[:db.datadir.rfind('/')])
return primary_datadirs
# --------------------------------------------------------------------
def get_mirror_root_datadirs(self):
"""
Returns a list of mirror data directories minus the <prefix><contentid>
"""
mirror_datadirs = []
seg0_hostname = self.segments[0].primaryDB.getSegmentAddress()
(seg0_hostname, inf_num) = get_host_interface(seg0_hostname)
for db in self.getDbList():
if db.isSegmentMirror(False) and db.getSegmentAddress().startswith(seg0_hostname):
mirror_datadirs.append(db.datadir[:db.datadir.rfind('/')])
return mirror_datadirs
# --------------------------------------------------------------------
def get_datadir_prefix(self):
"""
Returns the prefix portion of <prefix><contentid>
"""
start_last_dir = self.master.datadir.rfind('/') + 1
start_dir_content = self.master.datadir.rfind('-')
prefix = self.master.datadir[start_last_dir:start_dir_content]
return prefix
# --------------------------------------------------------------------
# If we've got recovered segments, and we have a matched-pair, we
# can update the catalog to "rebalance" back to our original primary.
def updateRoleForRecoveredSegs(self, dbURL):
"""
Marks the segment role to match the configured preferred_role.
"""
# walk our list of segments, checking to make sure that
# both members of the peer-group are in our recovered-list,
# save their content-id.
recovered_contents = []
for seg in self.segments:
if seg.primaryDB:
if seg.primaryDB.dbid in self.recoveredSegmentDbids:
if len(seg.mirrorDBs) > 0 and seg.mirrorDBs[0].dbid in self.recoveredSegmentDbids:
recovered_contents.append((seg.primaryDB.content, seg.primaryDB.dbid, seg.mirrorDBs[0].dbid))
conn = dbconn.connect(dbURL, True, allowSystemTableMods = 'dml')
for (content_id, primary_dbid, mirror_dbid) in recovered_contents:
sql = "UPDATE gp_segment_configuration SET role=preferred_role where content = %d" % content_id
dbconn.executeUpdateOrInsert(conn, sql, 2)
# NOTE: primary-dbid (right now) is the mirror.
sql = "INSERT INTO gp_configuration_history VALUES (now(), %d, 'Reassigned role for content %d to MIRROR')" % (primary_dbid, content_id)
dbconn.executeUpdateOrInsert(conn, sql, 1)
# NOTE: mirror-dbid (right now) is the primary.
sql = "INSERT INTO gp_configuration_history VALUES (now(), %d, 'Reassigned role for content %d to PRIMARY')" % (mirror_dbid, content_id)
dbconn.executeUpdateOrInsert(conn, sql, 1)
# We could attempt to update the segments-array.
# But the caller will re-read the configuration from the catalog.
dbconn.execSQL(conn, "COMMIT")
conn.close()
# --------------------------------------------------------------------
def addExpansionSeg(self, content, preferred_role, dbid, role,
hostname, address, port, datadir, replication_port, fileSpaces = None):
"""
Adds a segment to the gparray as an expansion segment.
Note: may work better to construct the new GpDB in gpexpand and
simply pass it in.
"""
if (content <= self.segments[-1].get_dbs()[0].content):
raise Exception('Invalid content ID for expansion segment')
segdb = GpDB(content = content,
preferred_role = preferred_role,
dbid = dbid,
role = role,
mode = MODE_SYNCHRONIZED,
status = STATUS_UP,
hostname = hostname,
address = address,
port = port,
datadir = datadir,
replicationPort = replication_port) # todo: add to parameters
if fileSpaces != None:
for fsOid in fileSpaces:
segdb.addSegmentFilespace(oid = fsOid, path = fileSpaces[fsOid])
seglen = len(self.segments)
expseglen = len(self.expansionSegments)
expseg_index = content - seglen
logger.debug('New segment index is %d' % expseg_index)
if expseglen < expseg_index + 1:
extendByNum = expseg_index - expseglen + 1
logger.debug('Extending expansion array by %d' % (extendByNum))
self.expansionSegments.extend([None] * (extendByNum))
if self.expansionSegments[expseg_index] == None:
self.expansionSegments[expseg_index] = Segment()
seg = self.expansionSegments[expseg_index]
if preferred_role == ROLE_PRIMARY:
if seg.primaryDB:
raise Exception('Duplicate content id for primary segment')
seg.addPrimary(segdb)
else:
seg.addMirror(segdb)
# --------------------------------------------------------------------
def reOrderExpansionSegs(self):
"""
The expansion segments content ID may have changed during the expansion.
This method will re-order the the segments into their proper positions.
Since there can be no gaps in the content id (see validateExpansionSegs),
the seg.expansionSegments list is the same length.
"""
seglen = len(self.segments)
expseglen = len(self.expansionSegments)
newExpansionSegments = []
newExpansionSegments.extend([None] * expseglen)
for seg in self.expansionSegments:
contentId = seg.primaryDB.getSegmentContentId()
index = contentId - seglen
newExpansionSegments[index] = seg
seg.expansionSegments = newExpansionSegments
# --------------------------------------------------------------------
def validateExpansionSegs(self):
""" Checks the segments added for various inconsistencies and errors.
"""
dbids = []
content = []
expansion_seg_count = 0
# make sure we have added at least one segment
if len(self.expansionSegments) == 0:
raise Exception('No expansion segments defined')
# how many mirrors?
mirrors_per_segment = len(self.segments[0].mirrorDBs)
for seg in self.expansionSegments:
# If a segment is 'None' that means we have a gap in the content ids
if seg is None:
raise Exception('Expansion segments do not have contiguous content ids.')
expansion_seg_count += 1
for segdb in seg.get_dbs():
dbids.append(segdb.getSegmentDbId())
if segdb.getSegmentRole() == ROLE_PRIMARY:
isprimary = True
else:
isprimary = False
content.append((segdb.getSegmentContentId(), isprimary))
# mirror count correct for this content id?
if mirrors_per_segment > 0:
if len(seg.mirrorDBs) != mirrors_per_segment:
raise Exception('Expansion segment has incorrect number of mirrors defined.')
else:
#shouldn't have any mirrors
if len(seg.mirrorDBs) != 0:
raise Exception('Expansion segment has a mirror segment defined but mirroring is not enabled.')
# check that the dbids are what they should be
dbids.sort()
# KAS Is the following really true? dbids don't need to be continuous
if dbids[0] != self.get_max_dbid() + 1:
raise Exception('Expansion segments have incorrect dbids')
for i in range(0, len(dbids) - 1):
if dbids[i] != dbids[i + 1] - 1:
raise Exception('Expansion segments have incorrect dbids')
# check that content ids are ok
valid_content = []
for i in range(self.segments[-1].primaryDB.content + 1,
self.segments[-1].primaryDB.content + 1 + len(self.expansionSegments)):
valid_content.append((i, True))
for j in range(0, mirrors_per_segment):
valid_content.append((i, False))
valid_content.sort(lambda x,y: cmp(x[0], y[0]) or cmp(x[1], y[1]))
content.sort(lambda x,y: cmp(x[0], y[0]) or cmp(x[1], y[1]))
if valid_content != content:
raise Exception('Invalid content ids')
# Check for redefinition data dirs and ports
datadirs = {}
used_ports = {}
used_replication_ports = {}
hostname = ""
for db in self.getDbList(True):
datadir = db.getSegmentDataDirectory()
hostname = db.getSegmentHostName()
port = db.getSegmentPort()
replication_port = db.getSegmentReplicationPort()
if datadirs.has_key(hostname):
if datadir in datadirs[hostname]:
raise Exception('Data directory %s used multiple times on host %s' % (datadir, hostname))
else:
datadirs[hostname].append(datadir)
else:
datadirs[hostname] = []
datadirs[hostname].append(datadir)
# Check ports
if used_ports.has_key(hostname):
if db.port in used_ports[hostname]:
raise Exception('Port %d is used multiple times on host %s' % (port, hostname))
else:
used_ports[hostname].append(db.port)
else:
used_ports[hostname] = []
used_ports[hostname].append(db.port)
# Check replication ports
if replication_port != None:
if used_replication_ports.has_key(hostname):
if replication_port in used_replication_ports[hostname]:
raise Exception('Replication Port %d is used multiple times on host %s' % (replication_port, hostname))
else:
used_replication_ports[hostname].append(replication_port)
else:
used_replication_ports[hostname] = []
used_replication_ports[hostname].append(replication_port)
# Check for redefinition of filespace dirs
dbList = self.getDbList(includeExpansionSegs = True)
hostDict = GpArray.getSegmentsByHostName(dbList)
for host in hostDict:
segList = hostDict[host]
dirList = []
for seg in segList:
dirDict = seg.getSegmentFilespaces()
for oid in dirDict:
if dirDict[oid] in dirList:
raise Exception('Data directory %s used multiple times on host %s' % (datadir, hostname))
else:
dirList.append(dirDict[oid])
# --------------------------------------------------------------------
def addExpansionHosts(self, hosts, mirror_type):
""" Adds a list of hosts to the array, using the same data
directories as the original hosts. Also adds the mirrors
based on mirror_type.
"""
# remove interface numbers if they exist
existing_hosts = []
for host in self.get_hostlist(True):
if host not in existing_hosts:
existing_hosts.append(host)
new_hosts = []
for host in hosts:
# see if we already have the host
if host in existing_hosts or host in new_hosts:
continue
else:
new_hosts.append(host)
if len(new_hosts) == 0:
raise Exception('No new hosts to add')
""" Get the first segment's host name, and use this host's configuration as a prototype """
seg0_hostname = self.segments[0].primaryDB.getSegmentHostName()
primary_list = self.get_list_of_primary_segments_on_host(seg0_hostname)
mirror_list = self.get_list_of_mirror_segments_on_host(seg0_hostname)
interface_list = self.get_interface_numbers()
base_primary_port = self.get_min_primary_port()
base_mirror_port = 0
base_primary_replication_port = None
base_mirror_replication_port = None
if mirror_type != 'none':
base_mirror_port = self.get_min_mirror_port()
base_primary_replication_port = self.get_min_primary_replication_port()
base_mirror_replication_port = self.get_min_mirror_replication_port()
prefix = self.get_datadir_prefix()
interface_list = self.get_interface_numbers()
interface_list.sort()
rows = createSegmentRowsFromSegmentList( newHostlist = new_hosts
, interface_list = interface_list
, primary_segment_list = primary_list
, primary_portbase = base_primary_port
, mirror_type = mirror_type
, mirror_segment_list = mirror_list
, mirror_portbase = base_mirror_port
, dir_prefix = prefix
, primary_replication_portbase = base_primary_replication_port
, mirror_replication_portbase = base_mirror_replication_port
)
self._fixup_and_add_expansion_segments(rows, interface_list)
# --------------------------------------------------------------------
def addExpansionDatadirs(self, datadirs, mirrordirs, mirror_type, fs_dirs = None, fs_mirror_dirs = None):
""" Adds new segments based on new data directories to both original
hosts and hosts that were added by addExpansionHosts.
"""
max_primary_port = self.get_max_primary_port()
max_mirror_port = 0
max_primary_replication_port = None
max_mirror_replication_port = None
if mirror_type != 'none':
max_mirror_port = self.get_max_mirror_port()
max_primary_replication_port = self.get_max_primary_replication_port()
max_mirror_replication_port = self.get_max_mirror_replication_port()
interface_list = self.get_interface_numbers()
interface_list.sort()
prefix = self.get_datadir_prefix()
hosts = []
# Get all the hosts to add the data dirs to
for seg in self.getSegDbList(includeExpansionSegs = True):
host = seg.getSegmentHostName()
if host not in hosts:
hosts.append(host)
# Create the rows
tempPrimaryRP = None
tempMirrorRP = None
if mirror_type != 'none':
tempPrimaryRP = max_primary_replication_port + 1
tempMirrorRP = max_mirror_replication_port + 1
rows = createSegmentRows( hostlist = hosts
, interface_list = interface_list
, primary_list = datadirs
, primary_portbase = max_primary_port + 1
, mirror_type = mirror_type
, mirror_list = mirrordirs
, mirror_portbase = max_mirror_port + 1
, dir_prefix = prefix
, primary_replication_portbase = tempPrimaryRP
, mirror_replication_portbase = tempMirrorRP
, primary_fs_list = fs_dirs
, mirror_fs_list = fs_mirror_dirs
)
self._fixup_and_add_expansion_segments(rows, interface_list)
# --------------------------------------------------------------------
def _fixup_and_add_expansion_segments(self, rows, interface_list):
"""Fixes up expansion segments added to be after the original segdbs
This includes fixing up the dbids, content ids, data directories,
interface part of the hostnames and mirrors. After this is done, it
adds them to the expansion array."""
interface_count = len(interface_list)
mirror_dict = {}
# must be sorted by isprimary, then hostname
rows.sort(lambda a,b: (cmp(b.isprimary, a.isprimary) or cmp(a.host,b.host)))
current_host = rows[0].host
curr_dbid = self.get_max_dbid(True) + 1
curr_content = self.get_max_contentid(True) + 1
# Fix up the rows with correct dbids, contentids, datadirs and interfaces
for row in rows:
hostname = row.host
address = row.address
# Add the new segment to the expansion segments array
# Remove the content id off of the datadir
new_datadir = row.fulldir[:row.fulldir.rfind(str(row.content))]
if row.isprimary == 't':
new_datadir += ('%d' % curr_content)
new_filespaces = GpDB.replaceFileSpaceContentID( fileSpaceDictionary = row.fileSpaceDictionary
, oldContent = row.content
, newContent = curr_content
)
self.addExpansionSeg(curr_content, ROLE_PRIMARY, curr_dbid,
ROLE_PRIMARY, hostname, address, int(row.port), new_datadir, row.prPort, fileSpaces = new_filespaces)
# The content id was adjusted, so we need to save it for the mirror
mirror_dict[int(row.content)] = int(curr_content)
curr_content += 1
else:
new_content = mirror_dict[int(row.content)]
new_datadir += ('%d' % int(new_content))
new_filespaces = GpDB.replaceFileSpaceContentID( fileSpaceDictionary = row.fileSpaceDictionary
, oldContent = row.content
, newContent = new_content
)
self.addExpansionSeg(new_content, ROLE_MIRROR, curr_dbid,
ROLE_MIRROR, hostname, address, int(row.port), new_datadir, row.prPort, fileSpaces = new_filespaces)
curr_dbid += 1
def guessIsMultiHome(self):
"""
Guess whether self is a multi-home (multiple interfaces per node) cluster
"""
segments = self.getSegDbList()
byHost = GpArray.getSegmentsByHostName(segments)
byAddress = GpArray.getSegmentsGroupedByValue(segments, GpDB.getSegmentAddress)
return len(byHost) != len(byAddress)
def guessIsSpreadMirror(self):
"""
Guess whether self is a spread mirroring configuration.
"""
if self.getFaultStrategy() != FAULT_STRATEGY_FILE_REPLICATION:
return False
mirrors = [seg for seg in self.getSegDbList() if seg.isSegmentMirror(current_role=False)]
primaries = [seg for seg in self.getSegDbList() if seg.isSegmentPrimary(current_role=False)]
assert len(mirrors) == len(primaries)
primaryHostNameToMirrorHostNameSet = {}
mirrorsByContentId = GpArray.getSegmentsByContentId(mirrors)
for primary in primaries:
mir = mirrorsByContentId[primary.getSegmentContentId()][0]
if primary.getSegmentHostName() not in primaryHostNameToMirrorHostNameSet:
primaryHostNameToMirrorHostNameSet[primary.getSegmentHostName()] = {}
primaryMap = primaryHostNameToMirrorHostNameSet[primary.getSegmentHostName()]
if mir.getSegmentHostName() not in primaryMap:
primaryMap[mir.getSegmentHostName()] = 0
primaryMap[mir.getSegmentHostName()] += 1
"""
This primary host has more than one segment on a single host: assume group mirroring!
"""
if primaryMap[mir.getSegmentHostName()] > 1:
return False
"""
Fall-through -- note that for a 2 host system with 1 segment per host, this will cause the guess to be 'spread'
"""
return True
@staticmethod
def getSegmentsGroupedByValue(segments, segmentMethodToGetValue):
result = {}
for segment in segments:
value = segmentMethodToGetValue(segment)
arr = result.get(value)
if arr is None:
result[value] = arr = []
arr.append(segment)
return result
@staticmethod
def getSegmentsByHostName(segments):
"""
Returns a map from segment host name to an array of segments (GpDB objects)
"""
return GpArray.getSegmentsGroupedByValue(segments, GpDB.getSegmentHostName)
@staticmethod
def getSegmentsByContentId(segments):
"""
Returns a map from segment contentId to an array of segments (GpDB objects)
"""
return GpArray.getSegmentsGroupedByValue(segments, GpDB.getSegmentContentId )
def getNumSegmentContents(self):
return len(GpArray.getSegmentsByContentId(self.getSegDbList()))
def getSegmentsAsLoadedFromDb(self):
"""
To be called by the configuration providers only
"""
return self.__segmentsAsLoadedFromDb
def setSegmentsAsLoadedFromDb(self, segments):
"""
To be called by the configuration providers only
"""
self.__segmentsAsLoadedFromDb = segments
def getStrategyAsLoadedFromDb(self):
"""
To be called by the configuration providers only
"""
return self.__strategyLoadedFromDb
def setStrategyAsLoadedFromDb(self, strategy):
"""
To be called by the configuration providers only
"""
self.__strategyLoadedFromDb = strategy
def get_segment_hosts(master_port):
"""
"""
gparray = GpArray.initFromCatalog( dbconn.DbURL(port=master_port), utility=True )
segments = GpArray.getSegmentsByHostName( gparray.getDbList() )
return segments.keys()
def get_session_ids(master_port):
"""
"""
conn = dbconn.connect( dbconn.DbURL(port=master_port), utility=True )
try:
rows = dbconn.execSQL(conn, "SELECT sess_id from pg_stat_activity where sess_id > 0;")
ids = set(row[0] for row in rows)
return ids
finally:
conn.close()
# === EOF ====
| apache-2.0 |
luotao1/Paddle | python/paddle/fluid/contrib/slim/tests/test_imperative_qat_addquantdequant.py | 1 | 19505 | # copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import shutil
import time
import unittest
import logging
import paddle
import six
import paddle.fluid as fluid
from paddle.nn import functional
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm
from paddle.fluid.layers import nn
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware, QuantizationTransformPass, AddQuantDequantPass
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.nn import Pool2D
from paddle.nn.layer.activation import ReLU, LeakyReLU, ReLU6, Tanh, Swish
from paddle.fluid.log_helper import get_logger
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def StaticLenet(data, num_classes=10):
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
conv2d_w3_attr = fluid.ParamAttr(name="conv2d_w_3")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
conv2d_b3_attr = fluid.ParamAttr(name="conv2d_b_3")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
conv1 = fluid.layers.conv2d(
data,
num_filters=6,
filter_size=3,
stride=1,
padding=1,
param_attr=conv2d_w1_attr,
bias_attr=conv2d_b1_attr)
conv1 = fluid.layers.leaky_relu(conv1, alpha=0.02)
pool1 = fluid.layers.pool2d(
conv1, pool_size=2, pool_type='max', pool_stride=2)
conv2 = fluid.layers.conv2d(
pool1,
num_filters=16,
filter_size=5,
stride=1,
padding=0,
param_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr)
pool2 = fluid.layers.pool2d(
conv2, pool_size=2, pool_type='max', pool_stride=2)
pool2 = fluid.layers.relu(pool2)
pool2 = fluid.layers.swish(pool2)
conv3 = fluid.layers.conv2d(
pool2,
num_filters=16,
filter_size=1,
stride=1,
padding=0,
param_attr=conv2d_w3_attr,
bias_attr=conv2d_b3_attr)
conv3 = fluid.layers.relu6(conv3)
conv3 = paddle.tensor.math.tanh(conv3)
fc1 = fluid.layers.fc(input=conv3,
size=120,
param_attr=fc_w1_attr,
bias_attr=fc_b1_attr)
fc2 = fluid.layers.fc(input=fc1,
size=84,
param_attr=fc_w2_attr,
bias_attr=fc_b2_attr)
fc3 = fluid.layers.fc(input=fc2,
size=num_classes,
param_attr=fc_w3_attr,
bias_attr=fc_b3_attr)
fc3 = fluid.layers.softmax(fc3, use_cudnn=True)
return fc3
class ImperativeLenet(fluid.dygraph.Layer):
def __init__(self, num_classes=10):
super(ImperativeLenet, self).__init__()
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
conv2d_w3_attr = fluid.ParamAttr(name="conv2d_w_3")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
conv2d_b3_attr = fluid.ParamAttr(name="conv2d_b_3")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
self.features = Sequential(
Conv2D(
in_channels=1,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
weight_attr=conv2d_w1_attr,
bias_attr=conv2d_b1_attr),
LeakyReLU(negative_slope=0.02),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
Conv2D(
in_channels=6,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
weight_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
ReLU(),
Swish(),
Conv2D(
in_channels=16,
out_channels=16,
kernel_size=1,
stride=1,
padding=0,
weight_attr=conv2d_w3_attr,
bias_attr=conv2d_b3_attr),
ReLU6(),
Tanh())
self.fc = Sequential(
Linear(
in_features=400,
out_features=120,
weight_attr=fc_w1_attr,
bias_attr=fc_b1_attr),
Linear(
in_features=120,
out_features=84,
weight_attr=fc_w2_attr,
bias_attr=fc_b2_attr),
Linear(
in_features=84,
out_features=num_classes,
weight_attr=fc_w3_attr,
bias_attr=fc_b3_attr),
Softmax())
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
class TestImperativeAddQuantDequant(unittest.TestCase):
@classmethod
def setUpClass(cls):
timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
cls.root_path = os.path.join(os.getcwd(),
"imperative_qat_aqd_" + timestamp)
cls.save_path = os.path.join(cls.root_path, "lenet")
cls.dynamic_root_path = os.path.join(os.getcwd(),
"dynamic_mnist_aqd_" + timestamp)
cls.dynamic_save_path = os.path.join(cls.dynamic_root_path, "model")
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.root_path)
shutil.rmtree(cls.dynamic_root_path)
def test_qat_save(self):
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
quantizable_layer_type=[
'Conv2D', 'Linear', 'ReLU', 'LeakyReLU', 'ReLU6', 'Tanh',
'Swish'
])
with fluid.dygraph.guard():
lenet = ImperativeLenet()
imperative_qat.quantize(lenet)
adam = AdamOptimizer(
learning_rate=0.001, parameter_list=lenet.parameters())
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=32, drop_last=True)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32)
epoch_num = 1
for epoch in range(epoch_num):
lenet.train()
for batch_id, data in enumerate(train_reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
acc = fluid.layers.accuracy(out, label)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
if batch_id % 100 == 0:
_logger.info(
"Train | At epoch {} step {}: loss = {:}, acc= {:}".
format(epoch, batch_id,
avg_loss.numpy(), acc.numpy()))
if batch_id == 500: # For shortening CI time
break
lenet.eval()
for batch_id, data in enumerate(test_reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
acc_top1 = fluid.layers.accuracy(
input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=out, label=label, k=5)
if batch_id % 100 == 0:
_logger.info(
"Test | At epoch {} step {}: acc1 = {:}, acc5 = {:}".
format(epoch, batch_id,
acc_top1.numpy(), acc_top5.numpy()))
# save weights
model_dict = lenet.state_dict()
fluid.save_dygraph(model_dict, "save_temp")
# test the correctness of `paddle.jit.save`
data = next(test_reader())
test_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
test_img = fluid.dygraph.to_variable(test_data)
lenet.eval()
before_save = lenet(test_img)
# save inference quantized model
paddle.jit.save(
layer=lenet,
path=TestImperativeAddQuantDequant.save_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
dirname=TestImperativeAddQuantDequant.root_path,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX)
after_save, = exe.run(inference_program,
feed={feed_target_names[0]: test_data},
fetch_list=fetch_targets)
self.assertTrue(
np.allclose(after_save, before_save.numpy()),
msg='Failed to save the inference quantized model.')
def test_qat_acc(self):
def _build_static_lenet(main, startup, is_test=False, seed=1000):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
main.random_seed = seed
startup.random_seed = seed
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
prediction = StaticLenet(img)
if not is_test:
loss = fluid.layers.cross_entropy(
input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
else:
avg_loss = prediction
return img, label, avg_loss
reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32, drop_last=True)
weight_quantize_type = 'abs_max'
activation_quant_type = 'moving_average_abs_max'
param_init_map = {}
seed = 1000
lr = 0.001
# imperative train
_logger.info(
"--------------------------dynamic graph qat--------------------------"
)
imperative_qat = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quant_type,
quantizable_layer_type=[
'Conv2D', 'Linear', 'ReLU', 'LeakyReLU', 'ReLU6', 'Tanh',
'Swish'
])
with fluid.dygraph.guard():
np.random.seed(seed)
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01, size=np.product(p_shape)).reshape(
p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_qat.quantize(lenet)
adam = AdamOptimizer(
learning_rate=lr, parameter_list=lenet.parameters())
dynamic_loss_rec = []
lenet.train()
for batch_id, data in enumerate(reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
dynamic_loss_rec.append(avg_loss.numpy()[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', avg_loss.numpy()))
if batch_id > 500:
break
lenet.eval()
paddle.jit.save(
layer=lenet,
path=TestImperativeAddQuantDequant.dynamic_save_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
# static graph train
_logger.info(
"--------------------------static graph qat--------------------------"
)
static_loss_rec = []
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
infer = fluid.Program()
startup = fluid.Program()
static_img, static_label, static_loss = _build_static_lenet(
main, startup, False, seed)
infer_img, _, infer_pre = _build_static_lenet(infer, startup, True,
seed)
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
opt = AdamOptimizer(learning_rate=lr)
opt.minimize(static_loss)
scope = core.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
for param in main.all_parameters():
param_tensor = scope.var(param.name).get_tensor()
param_tensor.set(param_init_map[param.name], place)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
infer_graph = IrGraph(core.Graph(infer.desc), for_test=True)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'])
add_quant_dequant_pass = AddQuantDequantPass(
scope=scope,
place=place,
quantizable_op_type=[
'relu', 'leaky_relu', 'relu6', 'tanh', 'swish'
])
transform_pass.apply(main_graph)
transform_pass.apply(infer_graph)
add_quant_dequant_pass.apply(main_graph)
add_quant_dequant_pass.apply(infer_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=static_loss.name, build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[static_img, static_label], place=place)
with fluid.scope_guard(scope):
for batch_id, data in enumerate(reader()):
loss_v, = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[static_loss])
static_loss_rec.append(loss_v[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', loss_v))
save_program = infer_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model("./static_mnist", [infer_img.name],
[infer_pre], exe, save_program)
rtol = 1e-08
atol = 1e-10
for i, (loss_d,
loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)):
diff = np.abs(loss_d - loss_s)
if diff > (atol + rtol * np.abs(loss_s)):
_logger.info(
"diff({}) at {}, dynamic loss = {}, static loss = {}".
format(diff, i, loss_d, loss_s))
break
self.assertTrue(
np.allclose(
np.array(dynamic_loss_rec),
np.array(static_loss_rec),
rtol=rtol,
atol=atol,
equal_nan=True),
msg='Failed to do the imperative qat.')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dreamer-dead/google-test | test/gtest_test_utils.py | 674 | 10826 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
loonycyborg/scons-plusplus | python_modules/Tool/link.py | 1 | 13850 | """SCons.Tool.link
Tool-specific initialization for the generic Posix linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/link.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import sys
import re
import os
import SCons.Tool
import SCons.Util
import SCons.Warnings
from SCons.Tool.FortranCommon import isfortran
from SCons.Tool.DCommon import isD
from SCons.Tool.cxx import iscplusplus
issued_mixed_link_warning = False
def smart_link(source, target, env, for_signature):
has_cplusplus = iscplusplus(source)
has_fortran = isfortran(env, source)
has_d = isD(env, source)
if has_cplusplus and has_fortran and not has_d:
global issued_mixed_link_warning
if not issued_mixed_link_warning:
msg = "Using $CXX to link Fortran and C++ code together.\n\t" + \
"This may generate a buggy executable if the '%s'\n\t" + \
"compiler does not know how to deal with Fortran runtimes."
SCons.Warnings.warn(SCons.Warnings.FortranCxxMixWarning,
msg % env.subst('$CXX'))
issued_mixed_link_warning = True
return '$CXX'
elif has_d:
env['LINKCOM'] = env['DLINKCOM']
env['SHLINKCOM'] = env['SHDLINKCOM']
return '$DC'
elif has_fortran:
return '$FORTRAN'
elif has_cplusplus:
return '$CXX'
return '$CC'
def _lib_emitter(target, source, env, **kw):
Verbose = False
if Verbose:
print("_lib_emitter: target[0]={!r}".format(target[0].get_path()))
for tgt in target:
if SCons.Util.is_String(tgt):
tgt = env.File(tgt)
tgt.attributes.shared = 1
try:
symlink_generator = kw['symlink_generator']
except KeyError:
pass
else:
if Verbose:
print("_lib_emitter: symlink_generator={!r}".format(symlink_generator))
symlinks = symlink_generator(env, target[0])
if Verbose:
print("_lib_emitter: symlinks={!r}".format(symlinks))
if symlinks:
SCons.Tool.EmitLibSymlinks(env, symlinks, target[0])
target[0].attributes.shliblinks = symlinks
return (target, source)
def shlib_emitter(target, source, env):
return _lib_emitter(target, source, env, symlink_generator=SCons.Tool.ShLibSymlinkGenerator)
def ldmod_emitter(target, source, env):
return _lib_emitter(target, source, env, symlink_generator=SCons.Tool.LdModSymlinkGenerator)
# This is generic enough to be included here...
def _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw):
"""For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so'"""
Verbose = False
if Verbose:
print("_versioned_lib_name: libnode={!r}".format(libnode.get_path()))
print("_versioned_lib_name: version={!r}".format(version))
print("_versioned_lib_name: prefix={!r}".format(prefix))
print("_versioned_lib_name: suffix={!r}".format(suffix))
print("_versioned_lib_name: suffix_generator={!r}".format(suffix_generator))
versioned_name = os.path.basename(libnode.get_path())
if Verbose:
print("_versioned_lib_name: versioned_name={!r}".format(versioned_name))
versioned_prefix = prefix_generator(env, **kw)
versioned_suffix = suffix_generator(env, **kw)
if Verbose:
print("_versioned_lib_name: versioned_prefix={!r}".format(versioned_prefix))
print("_versioned_lib_name: versioned_suffix={!r}".format(versioned_suffix))
versioned_prefix_re = '^' + re.escape(versioned_prefix)
versioned_suffix_re = re.escape(versioned_suffix) + '$'
name = re.sub(versioned_prefix_re, prefix, versioned_name)
name = re.sub(versioned_suffix_re, suffix, name)
if Verbose:
print("_versioned_lib_name: name={!r}".format(name))
return name
def _versioned_shlib_name(env, libnode, version, prefix, suffix, **kw):
prefix_generator = SCons.Tool.ShLibPrefixGenerator
suffix_generator = SCons.Tool.ShLibSuffixGenerator
return _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw)
def _versioned_ldmod_name(env, libnode, version, prefix, suffix, **kw):
prefix_generator = SCons.Tool.LdModPrefixGenerator
suffix_generator = SCons.Tool.LdModSuffixGenerator
return _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw)
def _versioned_lib_suffix(env, suffix, version):
"""For suffix='.so' and version='0.1.2' it returns '.so.0.1.2'"""
Verbose = False
if Verbose:
print("_versioned_lib_suffix: suffix={!r}".format(suffix))
print("_versioned_lib_suffix: version={!r}".format(version))
if not suffix.endswith(version):
suffix = suffix + '.' + version
if Verbose:
print("_versioned_lib_suffix: return suffix={!r}".format(suffix))
return suffix
def _versioned_lib_soname(env, libnode, version, prefix, suffix, name_func):
"""For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so.X'"""
Verbose = False
if Verbose:
print("_versioned_lib_soname: version={!r}".format(version))
name = name_func(env, libnode, version, prefix, suffix)
if Verbose:
print("_versioned_lib_soname: name={!r}".format(name))
major = version.split('.')[0]
soname = name + '.' + major
if Verbose:
print("_versioned_lib_soname: soname={!r}".format(soname))
return soname
def _versioned_shlib_soname(env, libnode, version, prefix, suffix):
return _versioned_lib_soname(env, libnode, version, prefix, suffix, _versioned_shlib_name)
def _versioned_ldmod_soname(env, libnode, version, prefix, suffix):
return _versioned_lib_soname(env, libnode, version, prefix, suffix, _versioned_ldmod_name)
def _versioned_lib_symlinks(env, libnode, version, prefix, suffix, name_func, soname_func):
"""Generate link names that should be created for a versioned shared library.
Returns a dictionary in the form { linkname : linktarget }
"""
Verbose = False
if Verbose:
print("_versioned_lib_symlinks: libnode={!r}".format(libnode.get_path()))
print("_versioned_lib_symlinks: version={!r}".format(version))
if sys.platform.startswith('openbsd'):
# OpenBSD uses x.y shared library versioning numbering convention
# and doesn't use symlinks to backwards-compatible libraries
if Verbose:
print("_versioned_lib_symlinks: return symlinks={!r}".format(None))
return None
linkdir = libnode.get_dir()
if Verbose:
print("_versioned_lib_symlinks: linkdir={!r}".format(linkdir.get_path()))
name = name_func(env, libnode, version, prefix, suffix)
if Verbose:
print("_versioned_lib_symlinks: name={!r}".format(name))
soname = soname_func(env, libnode, version, prefix, suffix)
if Verbose:
print("_versioned_lib_symlinks: soname={!r}".format(soname))
link0 = env.fs.File(soname, linkdir)
link1 = env.fs.File(name, linkdir)
# We create direct symlinks, not daisy-chained.
if link0 == libnode:
# This enables SHLIBVERSION without periods (e.g. SHLIBVERSION=1)
symlinks = [(link1, libnode)]
else:
# This handles usual SHLIBVERSION, i.e. '1.2', '1.2.3', etc.
symlinks = [(link0, libnode), (link1, libnode)]
if Verbose:
print("_versioned_lib_symlinks: return symlinks={!r}".format(SCons.Tool.StringizeLibSymlinks(symlinks)))
return symlinks
def _versioned_shlib_symlinks(env, libnode, version, prefix, suffix):
name_func = env['LINKCALLBACKS']['VersionedShLibName']
soname_func = env['LINKCALLBACKS']['VersionedShLibSoname']
return _versioned_lib_symlinks(env, libnode, version, prefix, suffix, name_func, soname_func)
def _versioned_ldmod_symlinks(env, libnode, version, prefix, suffix):
name_func = _versioned_ldmod_name
soname_func = _versioned_ldmod_soname
name_func = env['LINKCALLBACKS']['VersionedLdModName']
soname_func = env['LINKCALLBACKS']['VersionedLdModSoname']
return _versioned_lib_symlinks(env, libnode, version, prefix, suffix, name_func, soname_func)
def _versioned_lib_callbacks():
return {
'VersionedShLibSuffix': _versioned_lib_suffix,
'VersionedLdModSuffix': _versioned_lib_suffix,
'VersionedShLibSymlinks': _versioned_shlib_symlinks,
'VersionedLdModSymlinks': _versioned_ldmod_symlinks,
'VersionedShLibName': _versioned_shlib_name,
'VersionedLdModName': _versioned_ldmod_name,
'VersionedShLibSoname': _versioned_shlib_soname,
'VersionedLdModSoname': _versioned_ldmod_soname,
}.copy()
def _setup_versioned_lib_variables(env, **kw):
"""
Setup all variables required by the versioning machinery
"""
tool = None
try:
tool = kw['tool']
except KeyError:
pass
use_soname = False
try:
use_soname = kw['use_soname']
except KeyError:
pass
# The $_SHLIBVERSIONFLAGS define extra commandline flags used when
# building VERSIONED shared libraries. It's always set, but used only
# when VERSIONED library is built (see __SHLIBVERSIONFLAGS in SCons/Defaults.py).
if use_soname:
# If the linker uses SONAME, then we need this little automata
if tool == 'sunlink':
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS -h $_SHLIBSONAME'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS -h $_LDMODULESONAME'
else:
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS -Wl,-soname=$_SHLIBSONAME'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS -Wl,-soname=$_LDMODULESONAME'
env['_SHLIBSONAME'] = '${ShLibSonameGenerator(__env__,TARGET)}'
env['_LDMODULESONAME'] = '${LdModSonameGenerator(__env__,TARGET)}'
env['ShLibSonameGenerator'] = SCons.Tool.ShLibSonameGenerator
env['LdModSonameGenerator'] = SCons.Tool.LdModSonameGenerator
else:
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS'
# LDOMDULVERSIONFLAGS should always default to $SHLIBVERSIONFLAGS
env['LDMODULEVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK -o $TARGET $SHLINKFLAGS $__SHLIBVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
# don't set up the emitter, because AppendUnique will generate a list
# starting with None :-(
env.Append(SHLIBEMITTER=[shlib_emitter])
env['SMARTLINK'] = smart_link
env['LINK'] = "$SMARTLINK"
if 'LINKFLAGS' not in env:
env['LINKFLAGS'] = SCons.Util.CLVar('')
# __RPATH is only set to something ($_RPATH typically) on platforms that support it.
env['LINKCOM'] = '$LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['_LIBFLAGS'] = '${_stripixes(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, LIBPREFIXES, LIBSUFFIXES, __env__)}'
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = ''
if env['PLATFORM'] == 'hpux':
env['SHLIBSUFFIX'] = '.sl'
elif env['PLATFORM'] == 'aix':
env['SHLIBSUFFIX'] = '.a'
# For most platforms, a loadable module is the same as a shared
# library. Platforms which are different can override these, but
# setting them the same means that LoadableModule works everywhere.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env.Append(LDMODULEEMITTER=[ldmod_emitter])
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env[
'LDMODULECOM'] = '$LDMODULE -o $TARGET $LDMODULEFLAGS $__LDMODULEVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LDMODULEVERSION'] = '$SHLIBVERSION'
env['LDMODULENOVERSIONSYMLINKS'] = '$SHLIBNOVERSIONSYMLINKS'
def exists(env):
# This module isn't really a Tool on its own, it's common logic for
# other linkers.
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
bitcity/django | tests/utils_tests/test_crypto.py | 447 | 4581 | from __future__ import unicode_literals
import binascii
import hashlib
import unittest
from django.utils.crypto import constant_time_compare, pbkdf2
class TestUtilsCryptoMisc(unittest.TestCase):
def test_constant_time_compare(self):
# It's hard to test for constant time, just test the result.
self.assertTrue(constant_time_compare(b'spam', b'spam'))
self.assertFalse(constant_time_compare(b'spam', b'eggs'))
self.assertTrue(constant_time_compare('spam', 'spam'))
self.assertFalse(constant_time_compare('spam', 'eggs'))
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": b'\xba',
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
| bsd-3-clause |
kennedyshead/home-assistant | homeassistant/components/caldav/calendar.py | 5 | 11867 | """Support for WebDav Calendar."""
import copy
from datetime import datetime, timedelta
import logging
import re
import caldav
import voluptuous as vol
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
CalendarEventDevice,
calculate_offset,
get_date,
is_offset_reached,
)
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util import Throttle, dt
_LOGGER = logging.getLogger(__name__)
CONF_CALENDARS = "calendars"
CONF_CUSTOM_CALENDARS = "custom_calendars"
CONF_CALENDAR = "calendar"
CONF_SEARCH = "search"
CONF_DAYS = "days"
OFFSET = "!!"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
# pylint: disable=no-value-for-parameter
vol.Required(CONF_URL): vol.Url(),
vol.Optional(CONF_CALENDARS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_CUSTOM_CALENDARS, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_CALENDAR): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SEARCH): cv.string,
}
)
],
),
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_DAYS, default=1): cv.positive_int,
}
)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, disc_info=None):
"""Set up the WebDav Calendar platform."""
url = config[CONF_URL]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
days = config[CONF_DAYS]
client = caldav.DAVClient(
url, None, username, password, ssl_verify_cert=config[CONF_VERIFY_SSL]
)
calendars = client.principal().calendars()
calendar_devices = []
for calendar in list(calendars):
# If a calendar name was given in the configuration,
# ignore all the others
if config[CONF_CALENDARS] and calendar.name not in config[CONF_CALENDARS]:
_LOGGER.debug("Ignoring calendar '%s'", calendar.name)
continue
# Create additional calendars based on custom filtering rules
for cust_calendar in config[CONF_CUSTOM_CALENDARS]:
# Check that the base calendar matches
if cust_calendar[CONF_CALENDAR] != calendar.name:
continue
name = cust_calendar[CONF_NAME]
device_id = f"{cust_calendar[CONF_CALENDAR]} {cust_calendar[CONF_NAME]}"
entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(
name, calendar, entity_id, days, True, cust_calendar[CONF_SEARCH]
)
)
# Create a default calendar if there was no custom one
if not config[CONF_CUSTOM_CALENDARS]:
name = calendar.name
device_id = calendar.name
entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(name, calendar, entity_id, days)
)
add_entities(calendar_devices, True)
class WebDavCalendarEventDevice(CalendarEventDevice):
"""A device for getting the next Task from a WebDav Calendar."""
def __init__(self, name, calendar, entity_id, days, all_day=False, search=None):
"""Create the WebDav Calendar Event Device."""
self.data = WebDavCalendarData(calendar, days, all_day, search)
self.entity_id = entity_id
self._event = None
self._name = name
self._offset_reached = False
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {"offset_reached": self._offset_reached}
@property
def event(self):
"""Return the next upcoming event."""
return self._event
@property
def name(self):
"""Return the name of the entity."""
return self._name
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
def update(self):
"""Update event data."""
self.data.update()
event = copy.deepcopy(self.data.event)
if event is None:
self._event = event
return
event = calculate_offset(event, OFFSET)
self._offset_reached = is_offset_reached(event)
self._event = event
class WebDavCalendarData:
"""Class to utilize the calendar dav client object to get next event."""
def __init__(self, calendar, days, include_all_day, search):
"""Set up how we are going to search the WebDav calendar."""
self.calendar = calendar
self.days = days
self.include_all_day = include_all_day
self.search = search
self.event = None
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
# Get event list from the current calendar
vevent_list = await hass.async_add_executor_job(
self.calendar.date_search, start_date, end_date
)
event_list = []
for event in vevent_list:
vevent = event.instance.vevent
if not self.is_matching(vevent, self.search):
continue
uid = None
if hasattr(vevent, "uid"):
uid = vevent.uid.value
data = {
"uid": uid,
"summary": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description"),
}
data["start"] = get_date(data["start"]).isoformat()
data["end"] = get_date(data["end"]).isoformat()
event_list.append(data)
return event_list
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
start_of_today = dt.start_of_local_day()
start_of_tomorrow = dt.start_of_local_day() + timedelta(days=self.days)
# We have to retrieve the results for the whole day as the server
# won't return events that have already started
results = self.calendar.date_search(start_of_today, start_of_tomorrow)
# Create new events for each recurrence of an event that happens today.
# For recurring events, some servers return the original event with recurrence rules
# and they would not be properly parsed using their original start/end dates.
new_events = []
for event in results:
vevent = event.instance.vevent
for start_dt in vevent.getrruleset() or []:
_start_of_today = start_of_today
_start_of_tomorrow = start_of_tomorrow
if self.is_all_day(vevent):
start_dt = start_dt.date()
_start_of_today = _start_of_today.date()
_start_of_tomorrow = _start_of_tomorrow.date()
if _start_of_today <= start_dt < _start_of_tomorrow:
new_event = event.copy()
new_vevent = new_event.instance.vevent
if hasattr(new_vevent, "dtend"):
dur = new_vevent.dtend.value - new_vevent.dtstart.value
new_vevent.dtend.value = start_dt + dur
new_vevent.dtstart.value = start_dt
new_events.append(new_event)
elif _start_of_tomorrow <= start_dt:
break
vevents = [event.instance.vevent for event in results + new_events]
# dtstart can be a date or datetime depending if the event lasts a
# whole day. Convert everything to datetime to be able to sort it
vevents.sort(key=lambda x: self.to_datetime(x.dtstart.value))
vevent = next(
(
vevent
for vevent in vevents
if (
self.is_matching(vevent, self.search)
and (not self.is_all_day(vevent) or self.include_all_day)
and not self.is_over(vevent)
)
),
None,
)
# If no matching event could be found
if vevent is None:
_LOGGER.debug(
"No matching event found in the %d results for %s",
len(vevents),
self.calendar.name,
)
self.event = None
return
# Populate the entity attributes with the event values
self.event = {
"summary": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description"),
}
@staticmethod
def is_matching(vevent, search):
"""Return if the event matches the filter criteria."""
if search is None:
return True
pattern = re.compile(search)
return (
hasattr(vevent, "summary")
and pattern.match(vevent.summary.value)
or hasattr(vevent, "location")
and pattern.match(vevent.location.value)
or hasattr(vevent, "description")
and pattern.match(vevent.description.value)
)
@staticmethod
def is_all_day(vevent):
"""Return if the event last the whole day."""
return not isinstance(vevent.dtstart.value, datetime)
@staticmethod
def is_over(vevent):
"""Return if the event is over."""
return dt.now() >= WebDavCalendarData.to_datetime(
WebDavCalendarData.get_end_date(vevent)
)
@staticmethod
def get_hass_date(obj):
"""Return if the event matches."""
if isinstance(obj, datetime):
return {"dateTime": obj.isoformat()}
return {"date": obj.isoformat()}
@staticmethod
def to_datetime(obj):
"""Return a datetime."""
if isinstance(obj, datetime):
if obj.tzinfo is None:
# floating value, not bound to any time zone in particular
# represent same time regardless of which time zone is currently being observed
return obj.replace(tzinfo=dt.DEFAULT_TIME_ZONE)
return obj
return dt.as_local(dt.dt.datetime.combine(obj, dt.dt.time.min))
@staticmethod
def get_attr_value(obj, attribute):
"""Return the value of the attribute if defined."""
if hasattr(obj, attribute):
return getattr(obj, attribute).value
return None
@staticmethod
def get_end_date(obj):
"""Return the end datetime as determined by dtend or duration."""
if hasattr(obj, "dtend"):
enddate = obj.dtend.value
elif hasattr(obj, "duration"):
enddate = obj.dtstart.value + obj.duration.value
else:
enddate = obj.dtstart.value + timedelta(days=1)
return enddate
| apache-2.0 |
haoxli/web-testing-service | wts/tests/csp/w3c/CSP_004.py | 25 | 1810 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<html>
<head>
<title>CSP Test: script-src 'self' 'unsafe-inline'</title>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<meta description="Content-Security-Policy Test: script-src 'self' 'unsafe-inline'" />
<link rel="author" title="bhill@paypal-inc.com" />
<script src="../../resources/testharness.js"></script>
<script src="../../resources/testharnessreport.js"></script>
<script src="CSP_passTest001.py"></script>
</head>
<body>
<div id=log></div>
</body>
<!--
This test demonstrates how to test something that shouldn't happen, or
fail when something that should happend doesn't. Use script with
conditional execution based on the policy being tested to set a variable,
then use script we know will execute by policy to check if it is set.
Some limitations on this approach, obviously, if policy enforcement is
very broken - when we can't count on any script to execute - but this
is a start, at least.
-->
<script>
test(function() {assert_true(false)}, "assert_true with false from unsafe inline script");
</script>
</html> """
| bsd-3-clause |
2015fallproject/2015fallcase2 | static/Brython3.2.0-20150701-214155/Lib/encodings/cp869.py | 37 | 33654 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP869.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp869',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: None, # UNDEFINED
0x0081: None, # UNDEFINED
0x0082: None, # UNDEFINED
0x0083: None, # UNDEFINED
0x0084: None, # UNDEFINED
0x0085: None, # UNDEFINED
0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0087: None, # UNDEFINED
0x0088: 0x00b7, # MIDDLE DOT
0x0089: 0x00ac, # NOT SIGN
0x008a: 0x00a6, # BROKEN BAR
0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x008e: 0x2015, # HORIZONTAL BAR
0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x0093: None, # UNDEFINED
0x0094: None, # UNDEFINED
0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x0097: 0x00a9, # COPYRIGHT SIGN
0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0099: 0x00b2, # SUPERSCRIPT TWO
0x009a: 0x00b3, # SUPERSCRIPT THREE
0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00e6: 0x03bc, # GREEK SMALL LETTER MU
0x00e7: 0x03bd, # GREEK SMALL LETTER NU
0x00e8: 0x03be, # GREEK SMALL LETTER XI
0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00ea: 0x03c0, # GREEK SMALL LETTER PI
0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
0x00ef: 0x0384, # GREEK TONOS
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\ufffe' # 0x0080 -> UNDEFINED
'\ufffe' # 0x0081 -> UNDEFINED
'\ufffe' # 0x0082 -> UNDEFINED
'\ufffe' # 0x0083 -> UNDEFINED
'\ufffe' # 0x0084 -> UNDEFINED
'\ufffe' # 0x0085 -> UNDEFINED
'\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\ufffe' # 0x0087 -> UNDEFINED
'\xb7' # 0x0088 -> MIDDLE DOT
'\xac' # 0x0089 -> NOT SIGN
'\xa6' # 0x008a -> BROKEN BAR
'\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK
'\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u2015' # 0x008e -> HORIZONTAL BAR
'\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\ufffe' # 0x0093 -> UNDEFINED
'\ufffe' # 0x0094 -> UNDEFINED
'\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xa9' # 0x0097 -> COPYRIGHT SIGN
'\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb2' # 0x0099 -> SUPERSCRIPT TWO
'\xb3' # 0x009a -> SUPERSCRIPT THREE
'\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS
'\xa3' # 0x009c -> POUND SIGN
'\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI
'\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA
'\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU
'\u03be' # 0x00e8 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00ea -> GREEK SMALL LETTER PI
'\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU
'\u0384' # 0x00ef -> GREEK TONOS
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI
'\xa7' # 0x00f5 -> SECTION SIGN
'\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI
'\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA
'\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a3: 0x009c, # POUND SIGN
0x00a6: 0x008a, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x0097, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x0089, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x0099, # SUPERSCRIPT TWO
0x00b3: 0x009a, # SUPERSCRIPT THREE
0x00b7: 0x0088, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x0384: 0x00ef, # GREEK TONOS
0x0385: 0x00f7, # GREEK DIALYTIKA TONOS
0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA
0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA
0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA
0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA
0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA
0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA
0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x00b7, # GREEK CAPITAL LETTER MU
0x039d: 0x00b8, # GREEK CAPITAL LETTER NU
0x039e: 0x00bd, # GREEK CAPITAL LETTER XI
0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI
0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO
0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU
0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI
0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI
0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI
0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA
0x03b2: 0x00d7, # GREEK SMALL LETTER BETA
0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA
0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA
0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON
0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA
0x03b7: 0x00e1, # GREEK SMALL LETTER ETA
0x03b8: 0x00e2, # GREEK SMALL LETTER THETA
0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA
0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00e6, # GREEK SMALL LETTER MU
0x03bd: 0x00e7, # GREEK SMALL LETTER NU
0x03be: 0x00e8, # GREEK SMALL LETTER XI
0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00ea, # GREEK SMALL LETTER PI
0x03c1: 0x00eb, # GREEK SMALL LETTER RHO
0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ee, # GREEK SMALL LETTER TAU
0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00f3, # GREEK SMALL LETTER PHI
0x03c7: 0x00f4, # GREEK SMALL LETTER CHI
0x03c8: 0x00f6, # GREEK SMALL LETTER PSI
0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS
0x2015: 0x008e, # HORIZONTAL BAR
0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK
0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| agpl-3.0 |
Lilykos/invenio-oaiharvester | invenio_oaiharvester/tasks/harvesting.py | 3 | 10144 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tasks used for main OAI harvesting workflow."""
import os
import time
from functools import wraps
from invenio.base.globals import cfg
def init_harvesting(obj, eng):
"""Get all the options from previous state.
This function gets all the option linked to the task and stores them into the
object to be used later.
:param obj: Bibworkflow Object to process
:param eng: BibWorkflowEngine processing the object
"""
try:
obj.extra_data["options"] = eng.extra_data["options"]
except KeyError:
eng.log.error("No options for this task have been found. It is possible"
"that the following task could failed or work not as expected")
obj.extra_data["options"] = {}
eng.log.info("end of init_harvesting")
init_harvesting.description = 'Start harvesting'
def filtering_oai_pmh_identifier(obj, eng):
"""Check if the current OAI record has been processed already this run."""
from ..utils import identifier_extraction_from_string
if "oaiharvest" not in eng.extra_data:
eng.extra_data["oaiharvest"] = {}
if "identifiers" not in eng.extra_data["oaiharvest"]:
eng.extra_data["oaiharvest"]["identifiers"] = []
if not obj.data:
obj.log.error("No data found in object!")
return False
elif isinstance(obj.data, list):
# In case it is a list
obj.data = obj.data[0]
identifier = (identifier_extraction_from_string(obj.data) or
identifier_extraction_from_string(obj.data, oai_namespace="") or
"")
obj.extra_data["oai_identifier"] = identifier
if identifier in eng.extra_data["oaiharvest"]["identifiers"]:
# The record has already been harvested in this run
return False
else:
eng.extra_data["oaiharvest"]["identifiers"].append(identifier)
return True
def get_repositories_list(repositories=()):
"""Get repository list in options.
Here we are retrieving the oaiharvest configuration for the task.
It will allows in the future to do all the correct operations.
:param repositories:
"""
from invenio.modules.oaiharvester.models import OaiHARVEST
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
@wraps(get_repositories_list)
def _get_repositories_list(obj, eng):
repositories_to_harvest = repositories
reposlist_temp = []
if obj.extra_data["options"]["repository"]:
repositories_to_harvest = obj.extra_data["options"]["repository"]
if repositories_to_harvest:
for reposname in repositories_to_harvest:
try:
reposlist_temp.append(
OaiHARVEST.get(OaiHARVEST.name == reposname).one())
except (MultipleResultsFound, NoResultFound):
eng.log.critical(
"Repository %s doesn't exit into our database",
reposname)
else:
reposlist_temp = OaiHARVEST.get(OaiHARVEST.name != "").all()
true_repo_list = []
for repo in reposlist_temp:
true_repo_list.append(repo.to_dict())
if true_repo_list:
return true_repo_list
else:
eng.halt(
"No Repository named %s. Impossible to harvest non-existing things."
% repositories_to_harvest)
return _get_repositories_list
def harvest_records(obj, eng):
"""Run the harvesting task.
The row argument is the oaiharvest task queue row, containing if, arguments,
etc.
Return 1 in case of success and 0 in case of failure.
:param obj: BibworkflowObject being
:param eng: BibWorkflowEngine processing the object
"""
from invenio.modules.oaiharvester.utils import collect_identifiers
from invenio.modules.workflows.errors import WorkflowError
harvested_identifier_list = []
harvestpath = "%s_%d_%s_" % (
"%s/oaiharvest_%s" % (cfg['CFG_TMPSHAREDDIR'], eng.uuid),
1, time.strftime("%Y%m%d%H%M%S"))
# ## go ahead: check if user requested from-until harvesting
try:
if "dates" not in obj.extra_data["options"]:
obj.extra_data["options"]["dates"] = []
if "identifiers" not in obj.extra_data["options"]:
obj.extra_data["options"]["identifiers"] = []
except TypeError:
obj.extra_data["options"] = {"dates": [], "identifiers": []}
arguments = obj.extra_data["repository"]["arguments"]
if arguments:
eng.log.info("running with post-processes: %r" % (arguments,))
else:
eng.log.error(
"No arguments found... It can be causing major error after this point.")
# Harvest phase
if obj.extra_data["options"]["identifiers"]:
# Harvesting is done per identifier instead of server-updates
harvested_files_list = harvest_by_identifiers(obj, harvestpath)
else:
harvested_files_list = harvest_by_dates(obj, harvestpath)
if len(harvested_files_list) == 0:
eng.log.info("No records harvested for %s" % (obj.data["name"],))
# Retrieve all OAI IDs and set active list
harvested_identifier_list.append(collect_identifiers(harvested_files_list))
if len(harvested_files_list) != len(harvested_identifier_list[0]):
# Harvested files and its identifiers are 'out of sync', abort harvest
raise WorkflowError(
"Harvested files miss identifiers for %s" % (arguments,),
id_workflow=eng.uuid,
id_object=obj.id)
obj.extra_data['harvested_files_list'] = harvested_files_list
eng.log.info(
"%d files harvested and processed \n End harvest records task" % (
len(harvested_files_list),))
def get_records_from_file(path=None):
"""Allow to retrieve the records from a file."""
from ..utils import record_extraction_from_file
@wraps(get_records_from_file)
def _get_records_from_file(obj, eng):
if "_LoopData" not in eng.extra_data:
eng.extra_data["_LoopData"] = {}
if "get_records_from_file" not in eng.extra_data["_LoopData"]:
eng.extra_data["_LoopData"]["get_records_from_file"] = {}
if path:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(path)})
else:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(obj.data)})
eng.extra_data["_LoopData"]["get_records_from_file"][
"path"] = obj.data
elif os.path.isfile(obj.data) and obj.data != \
eng.extra_data["_LoopData"]["get_records_from_file"]["path"]:
eng.extra_data["_LoopData"]["get_records_from_file"].update(
{"data": record_extraction_from_file(obj.data)})
return eng.extra_data["_LoopData"]["get_records_from_file"]["data"]
return _get_records_from_file
def harvest_by_identifiers(obj, harvestpath):
"""Harvest an OAI repository by identifiers using a workflow object.
Given a repository "object" (dict from DB) and a list of OAI identifiers
of records in the repository perform a OAI harvest using GetRecord
for each.
The records will be harvested into the specified filepath.
"""
from ..getter import oai_harvest_get
harvested_files_list = []
for oai_identifier in obj.extra_data["options"]["identifiers"]:
harvested_files_list.extend(oai_harvest_get(prefix=obj.data["metadataprefix"],
baseurl=obj.data["baseurl"],
harvestpath=harvestpath,
verb="GetRecord",
identifier=oai_identifier))
return harvested_files_list
def harvest_by_dates(obj, harvestpath):
"""
Harvest an OAI repository by dates.
Given a repository "object" (dict from DB) and from/to dates,
this function will perform an OAI harvest request for records
updated between the given dates.
If no dates are given, the repository is harvested from the beginning.
If you set fromdate == last-run and todate == None, then the repository
will be harvested since last time (most common type).
The records will be harvested into the specified filepath.
"""
from ..getter import oai_harvest_get
if obj.extra_data["options"]["dates"]:
fromdate = str(obj.extra_data["options"]["dates"][0])
todate = str(obj.extra_data["options"]["dates"][1])
elif obj.data["lastrun"] is None or obj.data["lastrun"] == '':
fromdate = None
todate = None
obj.extra_data["_should_last_run_be_update"] = True
else:
fromdate = str(obj.data["lastrun"]).split()[0]
todate = None
obj.extra_data["_should_last_run_be_update"] = True
return oai_harvest_get(prefix=obj.data["metadataprefix"],
baseurl=obj.data["baseurl"],
harvestpath=harvestpath,
fro=fromdate,
until=todate,
setspecs=obj.data["setspecs"])
| gpl-2.0 |
ybrs/terminator-forked | build/lib.linux-x86_64-2.7/terminatorlib/configobj/configobj.py | 98 | 88163 | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2010 Michael Foord, Nicola Larosa
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# nico AT tekNico DOT net
# ConfigObj 4
# http://www.voidspace.org.uk/python/configobj.html
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# For information about bugfixes, updates and support, please join the
# ConfigObj mailing list:
# http://lists.sourceforge.net/lists/listinfo/configobj-develop
# Comments, suggestions and bug reports welcome.
from __future__ import generators
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__version__ = '4.7.2'
try:
any
except NameError:
def any(iterable):
for entry in iterable:
if entry:
return True
return False
__all__ = (
'__version__',
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return map(self.build, o.getChildren())
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = i.next()
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = map(self.build_Const, o.getChildren())
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
return _builder.build(getObj(s))
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.iteritems():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, basestring):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, basestring):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, basestring):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, basestring):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, basestring):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return zip((self.scalars + self.sections), self.values())
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(self.items())
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(self.values())
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in indict.items():
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, basestring):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a')
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b')
3.2000000000000002
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning, stacklevel=2)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in OPTION_DEFAULTS.items():
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, basestring):
self.filename = infile
if os.path.isfile(infile):
h = open(infile, 'rb')
infile = h.read() or []
h.close()
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
h = open(infile, 'w')
h.write('')
h.close()
infile = []
elif isinstance(infile, (list, tuple)):
infile = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
infile = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if infile:
# don't do it for the empty ConfigObj
infile = self._handle_bom(infile)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in infile:
if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
infile = [line.rstrip('\r\n') for line in infile]
self._parse(infile)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in BOMS.items():
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in BOMS.items():
if not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, basestring):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, basestring):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if self.encoding:
return aString.decode('ascii')
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, basestring):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
if not isinstance(line, unicode):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if not self.encoding:
return line
if isinstance(line, str) and self.default_encoding:
return line.decode(self.default_encoding)
return line
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, basestring):
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, basestring):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError, e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError, e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, dict):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if self.encoding:
output = output.encode(self.encoding)
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output = BOM_UTF8 + output
if not output.endswith(newline):
output += newline
if outfile is not None:
outfile.write(output)
else:
h = open(self.filename, 'wb')
h.write(output)
h.close()
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass, e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, basestring):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return results
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return results
for (key, val) in res.items():
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return results
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| gpl-2.0 |
andrestr02/blender | measureit/src/measureit_render.py | 3 | 12230 | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# PEP8 compliant (https://www.python.org/dev/peps/pep-0008)
# ----------------------------------------------------------
# File: measureit_render.py
# support routines for render measures in final image
# Author: Antonio Vazquez (antonioya)
#
# ----------------------------------------------------------
# noinspection PyUnresolvedReferences
import bpy
# noinspection PyUnresolvedReferences
import bgl
# noinspection PyUnresolvedReferences
import blf
# noinspection PyUnresolvedReferences
import mathutils
# noinspection PyUnresolvedReferences
import bmesh
import os
import sys
# noinspection PyUnresolvedReferences
import bpy_extras.image_utils as img_utils
# noinspection PyUnresolvedReferences
import bpy_extras.object_utils as object_utils
# noinspection PyUnresolvedReferences
from bpy_extras import view3d_utils
from math import ceil
from measureit_geometry import *
# -------------------------------------------------------------
# Render image main entry point
#
# -------------------------------------------------------------
def render_main(self, context, animation=False):
# noinspection PyBroadException,PyBroadException
try:
# Get visible layers
layers = []
scene = context.scene
for x in range(0, 20):
if scene.layers[x] is True:
layers.extend([x])
# Get object list
objlist = context.scene.objects
# --------------------
# Get resolution
# --------------------
scene = bpy.context.scene
render_scale = scene.render.resolution_percentage / 100
width = int(scene.render.resolution_x * render_scale)
height = int(scene.render.resolution_y * render_scale)
# ---------------------------------------
# Get output path
# ---------------------------------------
ren_path = bpy.context.scene.render.filepath
if len(ren_path) > 0:
if ren_path.endswith(os.path.sep):
initpath = os.path.realpath(ren_path) + os.path.sep
else:
(initpath, filename) = os.path.split(ren_path)
outpath = os.path.join(initpath, "measureit_tmp_render.png")
else:
self.report({'ERROR'},
"MeasureIt: Unable to save temporary render image. Define a valid render path")
return False
# Get Render Image
img = get_render_image(outpath)
if img is None:
self.report({'ERROR'},
"MeasureIt: Unable to save temporary render image. Define a valid render path")
return False
# -----------------------------
# Calculate rows and columns
# -----------------------------
tile_x = 240
tile_y = 216
row_num = ceil(height / tile_y)
col_num = ceil(width / tile_x)
print("MeasureIt: Image divided in " + str(row_num) + "x" + str(col_num) + " tiles")
# pixels out of visible area
cut4 = (col_num * tile_x * 4) - width * 4 # pixels aout of drawing area
totpixel4 = width * height * 4 # total pixels RGBA
viewport_info = bgl.Buffer(bgl.GL_INT, 4)
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport_info)
# Load image on memory
img.gl_load(0, bgl.GL_NEAREST, bgl.GL_NEAREST)
tex = img.bindcode
# --------------------------------------------
# Create output image (to apply texture)
# --------------------------------------------
if "measureit_output" in bpy.data.images:
out_img = bpy.data.images["measureit_output"]
if out_img is not None:
out_img.user_clear()
bpy.data.images.remove(out_img)
out = bpy.data.images.new("measureit_output", width, height)
tmp_pixels = [1] * totpixel4
# --------------------------------
# Loop for all tiles
# --------------------------------
for row in range(0, row_num):
for col in range(0, col_num):
buffer = bgl.Buffer(bgl.GL_FLOAT, width * height * 4)
bgl.glDisable(bgl.GL_SCISSOR_TEST) # if remove this line, get blender screenshot not image
bgl.glViewport(0, 0, tile_x, tile_y)
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glLoadIdentity()
# defines ortographic view for single tile
x1 = tile_x * col
y1 = tile_y * row
bgl.gluOrtho2D(x1, x1 + tile_x, y1, y1 + tile_y)
# Clear
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT | bgl.GL_DEPTH_BUFFER_BIT)
bgl.glEnable(bgl.GL_TEXTURE_2D)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, tex)
# defines drawing area
bgl.glBegin(bgl.GL_QUADS)
bgl.glColor3f(1.0, 1.0, 1.0)
bgl.glTexCoord2f(0.0, 0.0)
bgl.glVertex2f(0.0, 0.0)
bgl.glTexCoord2f(1.0, 0.0)
bgl.glVertex2f(width, 0.0)
bgl.glTexCoord2f(1.0, 1.0)
bgl.glVertex2f(width, height)
bgl.glTexCoord2f(0.0, 1.0)
bgl.glVertex2f(0.0, height)
bgl.glEnd()
# -----------------------------
# Loop to draw all lines
# -----------------------------
for myobj in objlist:
if myobj.hide is False:
if 'MeasureGenerator' in myobj:
# verify visible layer
for x in range(0, 20):
if myobj.layers[x] is True:
if x in layers:
op = myobj.MeasureGenerator[0]
draw_segments(context, myobj, op, None, None)
break
# -----------------------------
# Loop to draw all debug
# -----------------------------
if scene.measureit_debug is True:
selobj = bpy.context.selected_objects
for myobj in selobj:
if scene.measureit_debug_vertices is True:
draw_vertices(context, myobj, None, None)
if scene.measureit_debug_faces is True or scene.measureit_debug_normals is True:
draw_faces(context, myobj, None, None)
if scene.measureit_rf is True:
bgl.glColor3f(1.0, 1.0, 1.0)
rfcolor = scene.measureit_rf_color
rfborder = scene.measureit_rf_border
rfline = scene.measureit_rf_line
bgl.glLineWidth(rfline)
bgl.glColor4f(rfcolor[0], rfcolor[1], rfcolor[2], rfcolor[3])
x1 = rfborder
x2 = width - rfborder
y1 = int(math.ceil(rfborder / (width / height)))
y2 = height - y1
draw_rectangle((x1, y1), (x2, y2))
# --------------------------------
# copy pixels to temporary area
# --------------------------------
bgl.glFinish()
bgl.glReadPixels(0, 0, width, height, bgl.GL_RGBA, bgl.GL_FLOAT, buffer) # read image data
for y in range(0, tile_y):
# final image pixels position
p1 = (y * width * 4) + (row * tile_y * width * 4) + (col * tile_x * 4)
p2 = p1 + (tile_x * 4)
# buffer pixels position
b1 = y * width * 4
b2 = b1 + (tile_x * 4)
if p1 < totpixel4: # avoid pixel row out of area
if col == col_num - 1: # avoid pixel columns out of area
p2 -= cut4
b2 -= cut4
tmp_pixels[p1:p2] = buffer[b1:b2]
# -----------------------
# Copy temporary to final
# -----------------------
out.pixels = tmp_pixels[:] # Assign image data
img.gl_free() # free opengl image memory
# delete image
img.user_clear()
bpy.data.images.remove(img)
# remove temp file
os.remove(outpath)
# reset
bgl.glEnable(bgl.GL_SCISSOR_TEST)
# -----------------------
# restore opengl defaults
# -----------------------
bgl.glLineWidth(1)
bgl.glDisable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
# Saves image
if out is not None and (scene.measureit_render is True or animation is True):
ren_path = bpy.context.scene.render.filepath
filename = "mit_frame"
if len(ren_path) > 0:
if ren_path.endswith(os.path.sep):
initpath = os.path.realpath(ren_path) + os.path.sep
else:
(initpath, filename) = os.path.split(ren_path)
ftxt = "%04d" % scene.frame_current
outpath = os.path.join(initpath, filename + ftxt + ".png")
save_image(self, outpath, out)
return True
except:
print("Unexpected error:" + str(sys.exc_info()))
self.report({'ERROR'}, "MeasureIt: Unable to create render image")
return False
# --------------------------------------------------------------------
# Get the final render image and return as image object
#
# return None if no render available
# --------------------------------------------------------------------
def get_render_image(outpath):
saved = False
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
result = bpy.data.images['Render Result']
if result.has_data is False:
# this save produce to fill data image
result.save_render(outpath)
saved = True
except:
print("No render image found")
return None
# Save and reload
if saved is False:
result.save_render(outpath)
img = img_utils.load_image(outpath)
return img
except:
print("Unexpected render image error")
return None
# -------------------------------------
# Save image to file
# -------------------------------------
def save_image(self, filepath, myimage):
# noinspection PyBroadException
try:
# Save old info
settings = bpy.context.scene.render.image_settings
myformat = settings.file_format
mode = settings.color_mode
depth = settings.color_depth
# Apply new info and save
settings.file_format = 'PNG'
settings.color_mode = "RGBA"
settings.color_depth = '8'
myimage.save_render(filepath)
print("MeasureIt: Image " + filepath + " saved")
# Restore old info
settings.file_format = myformat
settings.color_mode = mode
settings.color_depth = depth
except:
print("Unexpected error:" + str(sys.exc_info()))
self.report({'ERROR'}, "MeasureIt: Unable to save render image")
return
| gpl-2.0 |
rabernat/PyTables | examples/array4.py | 13 | 1713 | from __future__ import print_function
import numpy as np
import tables
basedim = 4
file = "array4.h5"
# Open a new empty HDF5 file
fileh = tables.open_file(file, mode="w")
# Get the root group
group = fileh.root
# Set the type codes to test
dtypes = [np.int8, np.uint8, np.int16, np.int, np.float32, np.float]
i = 1
for dtype in dtypes:
# Create an array of dtype, with incrementally bigger ranges
a = np.ones((basedim,) * i, dtype)
# Save it on the HDF5 file
dsetname = 'array_' + a.dtype.char
hdfarray = fileh.create_array(group, dsetname, a, "Large array")
print("Created dataset:", hdfarray)
# Create a new group
group = fileh.create_group(group, 'group' + str(i))
# increment the range for next iteration
i += 1
# Close the file
fileh.close()
# Open the previous HDF5 file in read-only mode
fileh = tables.open_file(file, mode="r")
# Get the root group
group = fileh.root
# Get the metadata on the previosly saved arrays
for i in range(len(dtypes)):
# Create an array for later comparison
a = np.ones((basedim,) * (i + 1), dtypes[i])
# Get the dset object hangin from group
dset = getattr(group, 'array_' + a.dtype.char)
print("Info from dataset:", repr(dset))
# Read the actual data in array
b = dset.read()
print("Array b read from file. Shape ==>", b.shape, end=' ')
print(". Dtype ==> %s" % b.dtype)
# Test if the original and read arrays are equal
if np.allclose(a, b):
print("Good: Read array is equal to the original")
else:
print("Error: Read array and the original differs!")
# Iterate over the next group
group = getattr(group, 'group' + str(i + 1))
# Close the file
fileh.close()
| bsd-3-clause |
gymnasium/edx-platform | lms/djangoapps/mobile_api/course_info/tests.py | 20 | 8097 | """
Tests for course_info
"""
import ddt
from django.conf import settings
from milestones.tests.utils import MilestonesTestCaseMixin
from nose.plugins.attrib import attr
from xmodule.html_module import CourseInfoModule
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import import_course_from_xml
from ..testutils import MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin
@attr(shard=3)
@ddt.ddt
class TestUpdates(MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/course_info/{course_id}/updates
"""
REVERSE_INFO = {'name': 'course-updates-list', 'params': ['course_id']}
def verify_success(self, response):
super(TestUpdates, self).verify_success(response)
self.assertEqual(response.data, [])
@ddt.data(True, False)
def test_updates(self, new_format):
"""
Tests updates endpoint with /static in the content.
Tests both new updates format (using "items") and old format (using "data").
"""
self.login_and_enroll()
# create course Updates item in modulestore
updates_usage_key = self.course.id.make_usage_key('course_info', 'updates')
course_updates = modulestore().create_item(
self.user.id,
updates_usage_key.course_key,
updates_usage_key.block_type,
block_id=updates_usage_key.block_id
)
# store content in Updates item (either new or old format)
num_updates = 3
if new_format:
for num in range(1, num_updates + 1):
course_updates.items.append(
{
"id": num,
"date": "Date" + str(num),
"content": "<a href=\"/static/\">Update" + str(num) + "</a>",
"status": CourseInfoModule.STATUS_VISIBLE
}
)
else:
update_data = ""
# old format stores the updates with the newest first
for num in range(num_updates, 0, -1):
update_data += "<li><h2>Date" + str(num) + "</h2><a href=\"/static/\">Update" + str(num) + "</a></li>"
course_updates.data = u"<ol>" + update_data + "</ol>"
modulestore().update_item(course_updates, self.user.id)
# call API
response = self.api_response()
# verify static URLs are replaced in the content returned by the API
self.assertNotIn("\"/static/", response.content)
# verify static URLs remain in the underlying content
underlying_updates = modulestore().get_item(updates_usage_key)
underlying_content = underlying_updates.items[0]['content'] if new_format else underlying_updates.data
self.assertIn("\"/static/", underlying_content)
# verify content and sort order of updates (most recent first)
for num in range(1, num_updates + 1):
update_data = response.data[num_updates - num]
self.assertEquals(num, update_data['id'])
self.assertEquals("Date" + str(num), update_data['date'])
self.assertIn("Update" + str(num), update_data['content'])
@attr(shard=3)
@ddt.ddt
class TestHandouts(MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/course_info/{course_id}/handouts
"""
REVERSE_INFO = {'name': 'course-handouts-list', 'params': ['course_id']}
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_handouts(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
response = self.api_response(expected_response_code=200)
self.assertIn("Sample", response.data['handouts_html'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_no_handouts(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
# delete handouts in course
handouts_usage_key = self.course.id.make_usage_key('course_info', 'handouts')
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.course.id):
self.store.delete_item(handouts_usage_key, self.user.id)
response = self.api_response(expected_response_code=200)
self.assertIsNone(response.data['handouts_html'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_empty_handouts(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
# set handouts to empty tags
handouts_usage_key = self.course.id.make_usage_key('course_info', 'handouts')
underlying_handouts = self.store.get_item(handouts_usage_key)
underlying_handouts.data = "<ol></ol>"
self.store.update_item(underlying_handouts, self.user.id)
response = self.api_response(expected_response_code=200)
self.assertIsNone(response.data['handouts_html'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_handouts_static_rewrites(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
# check that we start with relative static assets
handouts_usage_key = self.course.id.make_usage_key('course_info', 'handouts')
underlying_handouts = self.store.get_item(handouts_usage_key)
self.assertIn('\'/static/', underlying_handouts.data)
# but shouldn't finish with any
response = self.api_response()
self.assertNotIn('\'/static/', response.data['handouts_html'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_jump_to_id_handout_href(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
# check that we start with relative static assets
handouts_usage_key = self.course.id.make_usage_key('course_info', 'handouts')
underlying_handouts = self.store.get_item(handouts_usage_key)
underlying_handouts.data = "<a href=\"/jump_to_id/identifier\">Intracourse Link</a>"
self.store.update_item(underlying_handouts, self.user.id)
# but shouldn't finish with any
response = self.api_response()
self.assertIn("/courses/{}/jump_to_id/".format(self.course.id), response.data['handouts_html'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_url_handout_href(self, default_ms):
with self.store.default_store(default_ms):
self.add_mobile_available_toy_course()
# check that we start with relative static assets
handouts_usage_key = self.course.id.make_usage_key('course_info', 'handouts')
underlying_handouts = self.store.get_item(handouts_usage_key)
underlying_handouts.data = "<a href=\"/course/identifier\">Linked Content</a>"
self.store.update_item(underlying_handouts, self.user.id)
# but shouldn't finish with any
response = self.api_response()
self.assertIn("/courses/{}/".format(self.course.id), response.data['handouts_html'])
def add_mobile_available_toy_course(self):
""" use toy course with handouts, and make it mobile_available """
course_items = import_course_from_xml(
self.store, self.user.id,
settings.COMMON_TEST_DATA_ROOT, ['toy'],
create_if_not_present=True
)
self.course = course_items[0]
self.course.mobile_available = True
self.store.update_item(self.course, self.user.id)
self.login_and_enroll()
| agpl-3.0 |
davidszotten/pytest | testing/code/test_code.py | 2 | 4979 | # coding: utf-8
from __future__ import absolute_import, division, print_function
import sys
import _pytest._code
import pytest
from six import text_type
from test_excinfo import TWMock
try:
import mock
except ImportError:
import unittest.mock as mock
def test_ne():
code1 = _pytest._code.Code(compile('foo = "bar"', "", "exec"))
assert code1 == code1
code2 = _pytest._code.Code(compile('foo = "baz"', "", "exec"))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = "abc-123"
co_code = compile("pass\n", name, "exec")
assert co_code.co_filename == name
code = _pytest._code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A(object):
pass
pytest.raises(TypeError, "_pytest._code.Code(A)")
def x():
raise NotImplementedError()
def test_code_fullsource():
code = _pytest._code.Code(x)
full = code.fullsource
assert "test_code_fullsource()" in str(full)
def test_code_source():
code = _pytest._code.Code(x)
src = code.source()
expected = """def x():
raise NotImplementedError()"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
with mock.patch.object(f.code.__class__, "fullsource", None):
assert f.statement == ""
def test_code_from_func():
co = _pytest._code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_unicode_handling():
value = u"ąć".encode("UTF-8")
def f():
raise Exception(value)
excinfo = pytest.raises(Exception, f)
text_type(excinfo)
if sys.version_info < (3,):
bytes(excinfo)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="python 2 only issue")
def test_unicode_handling_syntax_error():
value = u"ąć".encode("UTF-8")
def f():
raise SyntaxError("invalid syntax", (None, 1, 3, value))
excinfo = pytest.raises(Exception, f)
str(excinfo)
if sys.version_info[0] < 3:
text_type(excinfo)
def test_code_getargs():
def f1(x):
raise NotImplementedError()
c1 = _pytest._code.Code(f1)
assert c1.getargs(var=True) == ("x",)
def f2(x, *y):
raise NotImplementedError()
c2 = _pytest._code.Code(f2)
assert c2.getargs(var=True) == ("x", "y")
def f3(x, **z):
raise NotImplementedError()
c3 = _pytest._code.Code(f3)
assert c3.getargs(var=True) == ("x", "z")
def f4(x, *y, **z):
raise NotImplementedError()
c4 = _pytest._code.Code(f4)
assert c4.getargs(var=True) == ("x", "y", "z")
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = _pytest._code.Frame(f1("a"))
assert fr1.getargs(var=True) == [("x", "a")]
def f2(x, *y):
return sys._getframe(0)
fr2 = _pytest._code.Frame(f2("a", "b", "c"))
assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))]
def f3(x, **z):
return sys._getframe(0)
fr3 = _pytest._code.Frame(f3("a", b="c"))
assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = _pytest._code.Frame(f4("a", "b", c="d"))
assert fr4.getargs(var=True) == [("x", "a"), ("y", ("b",)), ("z", {"c": "d"})]
class TestExceptionInfo(object):
def test_bad_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry(object):
def test_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 6
assert "assert False" in source[5]
class TestReprFuncArgs(object):
def test_not_raise_exception_with_mixed_encoding(self):
from _pytest._code.code import ReprFuncArgs
tw = TWMock()
args = [("unicode_string", u"São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")]
r = ReprFuncArgs(args)
r.toterminal(tw)
if sys.version_info[0] >= 3:
assert (
tw.lines[0]
== r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'"
)
else:
assert tw.lines[0] == "unicode_string = São Paulo, utf8_string = São Paulo"
| mit |
DarkmoonFaire/server | dep/ACE_wrappers/bin/sets-manager.py | 94 | 7640 | #!/usr/bin/env python
""" This script implements branching and tagging in the DOC group
repository, and automates the process of creating sets. """
import os
def parse_args ():
from optparse import OptionParser
parser = OptionParser ("usage: %prog [options] name")
parser.add_option ("-A", "--ACE", dest="project", action="store_const",
help="Branch/tag only ACE", default=None, const="ace")
parser.add_option ("-T", "--TAO", dest="project", action="store_const",
help="Branch/tag ACE and TAO", default=None, const="tao")
parser.add_option ("-C", "--CIAO", dest="project", action="store_const",
help="Branch/tag ACE, TAO, and CIAO", default=None, const="ciao")
parser.add_option ("-t", "--tag", dest="action",
help="Create a tag", action="store_true", default=None)
parser.add_option ("-b", "--branch", dest="action", action="store_false",
help="Create a branch", default=None)
parser.add_option ("-v", "--verbose", dest="verbose", action="store_true",
help="Print out verbose debugging output", default=False)
parser.add_option ("-s", "--svn", dest="svn", default="svn",
help="Full path to svn binary, if not in path")
parser.add_option ("-r", "--repo", dest="repo",
default="https://svn.dre.vanderbilt.edu/DOC/Middleware/",
help="Repository to use, defaults to s.d.v.e/DOC/Middleware.")
parser.add_option ("--src", dest="source", default="trunk/",
help="Path in repository from which to branch, defaults to trunk")
parser.add_option ("--dest", dest="dest", default="",
help="Specifies a subdirectory of branches or tags in which " +
"to place the new branch/tag. dest must already exist.")
parser.add_option ("-n", dest="take_action", action="store_false", default=True,
help="Take no action")
(opts, args) = parser.parse_args ()
if len(args) != 1:
parser.error ("must specify exactly one branch or tag name")
if opts.action is None:
parser.error ("must specify either a branch or tag action")
if opts.project is None:
parser.error ("must specify a project to branch")
return (opts, args)
def execute (command):
from os import system
if opts.verbose:
print "executing " + command
if opts.take_action and os.system (command) != 0:
raise Exception ("Command failed: " + command)
def svn_copy (source, dest):
command = " ".join ([opts.svn,
"copy",
'-m "branching/tagging"',
source,
dest])
execute (command)
def svn_propset (path, prop, value):
temp = open ("sets_manager_props.tmp", 'w')
temp.write (value)
temp.close ()
command = " ".join ([opts.svn,
"propset",
prop,
"-F sets_manager_props.tmp",
path])
execute (command)
os.unlink ("sets_manager_props.tmp")
def svn_mkdir (path):
command = " ".join ([opts.svn,
"mkdir",
'-m "branching/tagging"',
path])
execute (command)
def svn_mkdir_local (path):
command = " ".join ([opts.svn,
"mkdir",
path])
execute (command)
def get_head_revision (url):
command = " ".join ([opts.svn,
"info",
url])
import re
lineregex = re.compile ("Last Changed Rev: (\d+)")
for line in os.popen (command).readlines ():
match = lineregex.match (line)
if (match is not None):
return int(match.group (1))
print "ERROR: Unable to find current MPC head revision"
raise Exception
def branch_ACE ():
# Perform branching
destination = opts.repo + opts.dest
svn_copy (opts.repo + opts.source + "/ACE",
destination + "modules/ACE")
# pin MPC revision
# Need local copy of the ACE directory to to the propset
# execute ("svn up -N " + opts.repo + path + "/modules/ACE sets_manager_temp/module_ACE")
execute ("svn up -N sets_manager_temp/modules/ACE")
mpc_rev = get_head_revision ("svn://svn.dre.vanderbilt.edu/DOC/MPC/trunk")
svn_propset ("sets_manager_temp/modules/ACE",
"svn:externals",
"%s\t-r %d %s" % ("MPC",
mpc_rev,
"svn://svn.dre.vanderbilt.edu/DOC/MPC/trunk"))
#Create the set
svn_mkdir_local ("sets_manager_temp/sets/ACE")
svn_propset ("sets_manager_temp/sets/ACE",
"svn:externals",
"%s\t%s" % ("ACE_wrappers",
destination + "modules/ACE"))
def branch_TAO ():
branch_ACE ()
# Perform branching
destination = opts.repo + opts.dest
svn_copy (opts.repo + opts.source + "/TAO",
destination + "modules/TAO")
#Create the set
svn_mkdir_local ("sets_manager_temp/sets/ACE+TAO")
svn_propset ("sets_manager_temp/sets/ACE+TAO",
"svn:externals",
"%s\t%s\n%s\t%s" % ("ACE_wrappers",
destination + "modules/ACE",
"ACE_wrappers/TAO",
destination + "modules/TAO"))
def branch_CIAO ():
branch_TAO ()
#Perform branching
destination = opts.repo + opts.dest
svn_copy (opts.repo + opts.source + "/CIAO",
destination + "modules/CIAO")
# Create the set
svn_mkdir_local ("sets_manager_temp/sets/ACE+TAO+CIAO")
svn_propset ("sets_manager_temp/sets/ACE+TAO+CIAO",
"svn:externals",
"%s\t%s\n%s\t%s\n%s\t%s" %
("ACE_wrappers",
destination + "modules/ACE",
"ACE_wrappers/TAO",
destination + "modules/TAO",
"ACE_wrappers/TAO/CIAO",
destination + "modules/CIAO"))
def main (opts, args):
# Lets make opts global
globals ()['opts'] = opts
path = str ()
if opts.action:
# True for tag
path = "tags/"
else: # Branch
path = "branches/"
path += "%s/%s" % (opts.dest, args[0])
# Make branch/tag directory
svn_mkdir (opts.repo + path)
execute ("svn co " + opts.repo + path + " sets_manager_temp")
# Make modules and sets subdirectory
svn_mkdir_local ("sets_manager_temp/modules")
svn_mkdir_local ("sets_manager_temp/sets")
# commit the new directories
execute ('svn commit -m "branching/tagging" sets_manager_temp')
# opts.dest should now be set to path, all of the branching
# functions assume dest now points to the branch/tag in which
# the copies should be places
opts.dest = path + '/'
{'ace': branch_ACE,
'tao': branch_TAO,
'ciao': branch_CIAO}[opts.project] ()
# Commit the sets directory
execute ('svn commit -m "branching/tagging" sets_manager_temp')
# remove the sets directory
for root, dirs, files in os.walk ('sets_manager_temp', False):
for name in files:
os.remove (os.path.join (root, name))
for name in dirs:
os.rmdir (os.path.join (root, name))
if __name__ == "__main__":
opts, args = parse_args ()
main (opts, args)
| gpl-2.0 |
kcompher/abstract_rendering | abstract_rendering/numeric.py | 2 | 7604 | from __future__ import print_function, division, absolute_import
from six.moves import reduce
import numpy as np
import math
import abstract_rendering.core as core
import abstract_rendering.util as util
# ----------- Aggregators -----------
class Count(core.GlyphAggregator):
"""Count the number of items that fall into a particular grid element."""
out_type = np.int32
identity = 0
def allocate(self, glyphset, screen):
(width, height) = screen
return np.zeros((height, width), dtype=self.out_type)
def combine(self, existing, glyph, shapecode, val):
update = self.glyphAggregates(glyph, shapecode, 1, self.identity)
existing[glyph[1]:glyph[3], glyph[0]:glyph[2]] += update
def rollup(self, *vals):
return reduce(lambda x, y: x+y, vals)
class Sum(core.GlyphAggregator):
"""Count the number of items that fall into a particular grid element."""
out_type = np.int32
identity = 0
def allocate(self, glyphset, screen):
(width, height) = screen
return np.zeros((height, width), dtype=self.out_type)
def combine(self, existing, glyph, shapecode, val):
update = self.glyphAggregates(glyph, shapecode, val, self.identity)
existing[glyph[1]:glyph[3], glyph[0]:glyph[2]] += update
def rollup(self, *vals):
return reduce(lambda x, y: x+y, vals)
# -------------- Shaders -----------------
class Floor(core.CellShader):
def shade(self, grid):
return np.floor(grid)
class Interpolate(core.CellShader):
"""Interpolate between two numbers.
Projects the input values between the low and high values passed.
The Default is 0 to 1.
Empty values are preserved (default is np.nan).
"""
def __init__(self, low=0, high=1, empty=np.nan):
self.low = low
self.high = high
self.empty = empty
def shade(self, grid):
# TODO: Gracefully handle if the whole grid is empty
mask = (grid == self.empty)
min = grid[~mask].min()
max = grid[~mask].max()
span = float(max-min)
percents = (grid-min)/span
return self.low + (percents * (self.high-self.low))
class Power(core.CellShader):
"""Raise to a power. Power may be fracional."""
def __init__(self, pow):
self.pow = pow
def shade(self, grid):
return np.power(grid, self.pow)
class Cuberoot(Power):
def __init__(self):
super(Cuberoot, self).__init__(1/3.0)
class Sqrt(core.CellShader):
def shade(self, grid):
return np.sqrt(grid)
class Spread(core.SequentialShader):
"""
Spreads the values out in a regular pattern.
* factor : How far in each direction to spread
TODO: Currently only does square spread. Extend to other shapes.
TODO: Restricted to numbers right now...implement corresponding thing
for categories...might be 'generic'
"""
def __init__(self, up=1, down=1, left=1, right=1, factor=np.NaN):
if np.isnan(factor):
self.up = up
self.down = down
self.left = left
self.right = right
else:
self.up = factor
self.down = factor
self.left = factor
self.right = factor
def makegrid(self, grid):
height = grid.shape[0]
width = grid.shape[1]
others = grid.shape[2:]
height = height + self.up + self.down
width = width + self.left + self.right
return np.zeros((height, width) + others, dtype=grid.dtype)
def cellfunc(self, grid, x, y):
(height, width) = grid.shape
minx = max(0, x-self.left-self.right)
maxx = min(x+1, width)
miny = max(0, y-self.up-self.down)
maxy = min(y+1, height)
parts = grid[miny:maxy, minx:maxx]
return parts.sum()
class BinarySegment(core.CellShader):
"""
Paint all pixels with aggregate value above divider one color
and below the divider another. Divider is part of the 'high' region.
TODO: Extend so out can be something other than colors
"""
in_type = (1, np.number)
out_type = (4, np.int32)
def __init__(self, low, high, divider):
self.high = high
self.low = low
self.divider = float(divider)
def shade(self, grid):
(width, height) = grid.shape[0], grid.shape[1]
outgrid = np.ndarray((width, height, 4), dtype=np.uint8)
mask = (grid >= self.divider)
outgrid[mask] = self.high
outgrid[~mask] = self.low
return outgrid
class InterpolateColors(core.CellShader):
"""
High-definition interpolation between two colors.
Zero-values are treated separately from other values.
TODO: Remove log, just provide a shader to pre-transform the values
TODO: Can this be combined with 'Interpolate'? Detect type at construction
* low -- Color ot use for lowest value
* high -- Color to use for highest values
* log -- Set to desired log base to use log-based interpolation
(use True or "e" for base-e; default is False)
* reserve -- color to use for empty cells
"""
in_type = (1, np.number)
out_type = (4, np.int32)
def __init__(self,
low, high,
log=False,
reserve=util.Color(255, 255, 255, 255),
empty=np.nan):
self.low = low
self.high = high
self.reserve = reserve
self.log = log
self.empty = empty
# TODO: there are issues with zeros here....
def _log(self, grid):
mask = (grid == self.empty)
min = grid[~mask].min()
max = grid[~mask].max()
grid[mask] = 1
if (self.log == 10):
min = math.log10(min)
max = math.log10(max)
span = float(max-min)
percents = (np.log10(grid)-min)/span
elif (self.log == math.e or self.log):
min = math.log(min)
max = math.log(max)
span = float(max-min)
percents = (np.log(grid)-min)/span
elif (self.log == 2):
min = math.log(min, self.log)
max = math.log(max, self.log)
span = float(max-min)
percents = (np.log2(grid)-min)/span
else:
rebase = math.log(self.log)
min = math.log(min, self.log)
max = math.log(max, self.log)
span = float(max-min)
percents = ((np.log(grid)/rebase)-min)/span
grid[mask] = 0
colorspan = (np.array(self.high, dtype=np.uint8)
- np.array(self.low, dtype=np.uint8))
outgrid = (percents[:, :, np.newaxis]
* colorspan[np.newaxis, np.newaxis, :]
+ np.array(self.low, dtype=np.uint8)).astype(np.uint8)
outgrid[mask] = self.reserve
return outgrid
def _linear(self, grid):
mask = (grid == self.empty)
min = grid[~mask].min()
max = grid[~mask].max()
span = float(max-min)
percents = (grid-min)/span
colorspan = (np.array(self.high, dtype=np.int32)
- np.array(self.low, dtype=np.int32))
outgrid = (percents[:, :, np.newaxis]
* colorspan[np.newaxis, np.newaxis, :]
+ np.array(self.low, dtype=np.uint8)).astype(np.uint8)
outgrid[mask] = self.reserve
return outgrid
def shade(self, grid):
if (self.log):
return self._log(grid)
else:
return self._linear(grid)
| bsd-3-clause |
gqwest-erp/server | openerp/addons/sale/__openerp__.py | 56 | 3440 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Management',
'version': '1.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Quotations, Sales Orders, Invoicing',
'description': """
Manage sales quotations and orders
==================================
This application allows you to manage your sales goals in an effective and efficient manner by keeping track of all sales orders and history.
It handles the full sales workflow:
* **Quotation** -> **Sales order** -> **Invoice**
Preferences (only with Warehouse Management installed)
------------------------------------------------------
If you also installed the Warehouse Management, you can deal with the following preferences:
* Shipping: Choice of delivery at once or partial delivery
* Invoicing: choose how invoices will be paid
* Incoterms: International Commercial terms
You can choose flexible invoicing methods:
* *On Demand*: Invoices are created manually from Sales Orders when needed
* *On Delivery Order*: Invoices are generated from picking (delivery)
* *Before Delivery*: A Draft invoice is created and must be paid before delivery
The Dashboard for the Sales Manager will include
------------------------------------------------
* My Quotations
* Monthly Turnover (Graph)
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/sale_dashboard.jpeg','images/Sale_order_line_to_invoice.jpeg','images/sale_order.jpeg','images/sales_analysis.jpeg'],
'depends': ['account_voucher'],
'data': [
'wizard/sale_make_invoice_advance.xml',
'wizard/sale_line_invoice.xml',
'wizard/sale_make_invoice.xml',
'security/sale_security.xml',
'security/ir.model.access.csv',
'sale_workflow.xml',
'sale_sequence.xml',
'sale_report.xml',
'sale_data.xml',
'sale_view.xml',
'res_partner_view.xml',
'report/sale_report_view.xml',
'process/sale_process.xml',
'board_sale_view.xml',
'edi/sale_order_action_data.xml',
'res_config_view.xml',
],
'demo': ['sale_demo.xml'],
'test': [
'test/sale_order_demo.yml',
'test/manual_order_policy.yml',
'test/cancel_order.yml',
'test/delete_order.yml',
'test/edi_sale_order.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Workday/OpenFrame | native_client_sdk/src/tools/nacl_config.py | 11 | 7496 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A helper script to print paths of NaCl binaries, includes, libs, etc.
It is similar in behavior to pkg-config or sdl-config.
"""
import argparse
import os
import posixpath
import sys
import getos
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
VALID_ARCHES = ('arm', 'x86_32', 'x86_64', 'i686')
VALID_PNACL_ARCHES = (None, 'pnacl')
ARCH_NAME = {
'arm': 'arm',
'x86_32': 'i686',
'i686': 'i686',
'x86_64': 'x86_64'
}
ARCH_ALT_NAME = {
'arm': 'arm',
'x86_32': 'x86_32',
'i686': 'x86_32',
'x86_64': 'x86_64'
}
ARCH_BASE_NAME = {
'arm': 'arm',
'x86_32': 'x86',
'i686': 'x86',
'x86_64': 'x86'
}
NACL_TOOLCHAINS = ('glibc', 'pnacl', 'bionic', 'clang-newlib')
HOST_TOOLCHAINS = ('linux', 'mac', 'win')
VALID_TOOLCHAINS = list(HOST_TOOLCHAINS) + list(NACL_TOOLCHAINS) + ['host']
# This is not an exhaustive list of tools, just the ones that need to be
# special-cased.
# e.g. For PNaCL cc => pnacl-clang
# For NaCl cc => pnacl-gcc
#
# Most tools will be passed through directly.
# e.g. For PNaCl foo => pnacl-foo
# For NaCl foo => x86_64-nacl-foo.
CLANG_TOOLS = {
'cc': 'clang',
'c++': 'clang++',
'gcc': 'clang',
'g++': 'clang++',
'ld': 'clang++'
}
GCC_TOOLS = {
'cc': 'gcc',
'c++': 'g++',
'gcc': 'gcc',
'g++': 'g++',
'ld': 'g++'
}
class Error(Exception):
pass
def Expect(condition, message):
if not condition:
raise Error(message)
def ExpectToolchain(toolchain, expected_toolchains):
Expect(toolchain in expected_toolchains,
'Expected toolchain to be one of [%s], not %s.' % (
', '.join(expected_toolchains), toolchain))
def ExpectArch(arch, expected_arches):
Expect(arch in expected_arches,
'Expected arch to be one of [%s], not %s.' % (
', '.join(map(str, expected_arches)), arch))
def CheckValidToolchainArch(toolchain, arch, arch_required=False):
if toolchain or arch or arch_required:
ExpectToolchain(toolchain, VALID_TOOLCHAINS)
if toolchain in HOST_TOOLCHAINS:
Expect(arch is None,
'Expected no arch for host toolchain %r. Got %r.' % (
toolchain, arch))
elif toolchain == 'pnacl':
Expect(arch is None or arch == 'pnacl',
'Expected no arch for toolchain %r. Got %r.' % (toolchain, arch))
elif arch_required:
Expect(arch is not None,
'Expected arch to be one of [%s] for toolchain %r.\n'
'Use the -a or --arch flags to specify one.\n' % (
', '.join(VALID_ARCHES), toolchain))
if arch:
if toolchain == 'pnacl':
ExpectArch(arch, VALID_PNACL_ARCHES)
else:
ExpectArch(arch, VALID_ARCHES)
def GetArchName(arch):
return ARCH_NAME.get(arch)
def GetArchAltName(arch):
return ARCH_ALT_NAME.get(arch)
def GetArchBaseName(arch):
return ARCH_BASE_NAME.get(arch)
def CanonicalizeToolchain(toolchain):
if toolchain == 'host':
return getos.GetPlatform()
return toolchain
def GetPosixSDKPath():
sdk_path = getos.GetSDKPath()
if getos.GetPlatform() == 'win':
return sdk_path.replace('\\', '/')
else:
return sdk_path
def GetToolchainDir(toolchain, arch=None):
ExpectToolchain(toolchain, NACL_TOOLCHAINS)
root = GetPosixSDKPath()
platform = getos.GetPlatform()
if toolchain in ('pnacl', 'clang-newlib'):
subdir = '%s_pnacl' % platform
else:
assert arch is not None
subdir = '%s_%s_%s' % (platform, GetArchBaseName(arch), toolchain)
return posixpath.join(root, 'toolchain', subdir)
def GetToolchainArchDir(toolchain, arch):
ExpectToolchain(toolchain, NACL_TOOLCHAINS)
assert arch is not None
toolchain_dir = GetToolchainDir(toolchain, arch)
arch_dir = '%s-nacl' % GetArchName(arch)
return posixpath.join(toolchain_dir, arch_dir)
def GetToolchainBinDir(toolchain, arch=None):
ExpectToolchain(toolchain, NACL_TOOLCHAINS)
return posixpath.join(GetToolchainDir(toolchain, arch), 'bin')
def GetSDKIncludeDirs(toolchain):
root = GetPosixSDKPath()
base_include = posixpath.join(root, 'include')
if toolchain == 'clang-newlib':
toolchain = 'newlib'
return [base_include, posixpath.join(base_include, toolchain)]
def GetSDKLibDir():
return posixpath.join(GetPosixSDKPath(), 'lib')
# Commands
def GetToolPath(toolchain, arch, tool):
if tool == 'gdb':
# Always use the same gdb; it supports multiple toolchains/architectures.
# NOTE: this is always a i686 executable. i686-nacl-gdb is a symlink to
# x86_64-nacl-gdb.
return posixpath.join(GetToolchainBinDir('glibc', 'x86_64'),
'x86_64-nacl-gdb')
if toolchain == 'pnacl':
CheckValidToolchainArch(toolchain, arch)
tool = CLANG_TOOLS.get(tool, tool)
full_tool_name = 'pnacl-%s' % tool
else:
CheckValidToolchainArch(toolchain, arch, arch_required=True)
ExpectArch(arch, VALID_ARCHES)
if toolchain == 'clang-newlib':
tool = CLANG_TOOLS.get(tool, tool)
else:
tool = GCC_TOOLS.get(tool, tool)
full_tool_name = '%s-nacl-%s' % (GetArchName(arch), tool)
return posixpath.join(GetToolchainBinDir(toolchain, arch), full_tool_name)
def GetCFlags(toolchain):
ExpectToolchain(toolchain, VALID_TOOLCHAINS)
return ' '.join('-I%s' % dirname for dirname in GetSDKIncludeDirs(toolchain))
def GetIncludeDirs(toolchain):
ExpectToolchain(toolchain, VALID_TOOLCHAINS)
return ' '.join(GetSDKIncludeDirs(toolchain))
def GetLDFlags():
return '-L%s' % GetSDKLibDir()
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-t', '--toolchain', help='toolchain name. This can also '
'be specified with the NACL_TOOLCHAIN environment '
'variable.')
parser.add_argument('-a', '--arch', help='architecture name. This can also '
'be specified with the NACL_ARCH environment variable.')
group = parser.add_argument_group('Commands')
group.add_argument('--tool', help='get tool path')
group.add_argument('--cflags',
help='output all preprocessor and compiler flags',
action='store_true')
group.add_argument('--libs', '--ldflags', help='output all linker flags',
action='store_true')
group.add_argument('--include-dirs',
help='output include dirs, separated by spaces',
action='store_true')
options = parser.parse_args(args)
# Get toolchain/arch from environment, if not specified on commandline
options.toolchain = options.toolchain or os.getenv('NACL_TOOLCHAIN')
options.arch = options.arch or os.getenv('NACL_ARCH')
options.toolchain = CanonicalizeToolchain(options.toolchain)
CheckValidToolchainArch(options.toolchain, options.arch)
if options.cflags:
print GetCFlags(options.toolchain)
elif options.include_dirs:
print GetIncludeDirs(options.toolchain)
elif options.libs:
print GetLDFlags()
elif options.tool:
print GetToolPath(options.toolchain, options.arch, options.tool)
else:
parser.error('Expected a command. Run with --help for more information.')
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
| bsd-3-clause |
naslanidis/ansible | lib/ansible/modules/windows/win_package.py | 9 | 5063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <trond@hindenes.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_package
version_added: "1.7"
author: Trond Hindenes
short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- Installs or uninstalls a package.
- 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
options:
path:
description:
- Location of the package to be installed (either on file system, network share or url)
required: true
name:
description:
- Name of the package, if name isn't specified the path will be used for log messages
required: false
default: null
product_id:
description:
- Product id of the installed package (used for checking if already installed)
- You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
required: true
aliases: [productid]
arguments:
description:
- Any arguments the installer needs
default: null
required: false
state:
description:
- Install or Uninstall
choices:
- present
- absent
default: present
required: false
aliases: [ensure]
user_name:
description:
- Username of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
default: null
required: false
user_password:
description:
- Password of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
default: null
required: false
expected_return_code:
description:
- One or more return codes from the package installation that indicates success.
- If not provided, defaults to 0
required: no
default: 0
'''
EXAMPLES = r'''
- name: Install the Visual C thingy
win_package:
name: Microsoft Visual C thingy
path: http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe
product_id: '{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}'
arguments: /install /passive /norestart
- name: Install Remote Desktop Connection Manager from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
- name: Uninstall Remote Desktop Connection Manager installed from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
state: absent
# Specify the expected non-zero return code when successful
# In this case 3010 indicates 'reboot required'
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: 3010
# Specify multiple non-zero return codes when successful
# In this case we can say that both 0 (SUCCESSFUL) and 3010 (REBOOT REQUIRED) codes are acceptable
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: [0,3010]
'''
| gpl-3.0 |
nealtodd/django | django/contrib/gis/gdal/feature.py | 439 | 4153 | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
rajanandakumar/DIRAC | DataManagementSystem/Agent/NamespaceBrowser.py | 7 | 1213 | # ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ###
# ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ### ABANDONWARE ###
#
# import random,types
# from DIRAC.Core.Utilities.List import sortList
#
# class NamespaceBrowser:
#
# def __init__(self,baseDir,sort=False):
# if type(baseDir) == types.ListType:
# self.activeDirs = baseDir
# else:
# self.activeDirs = [baseDir]
# self.sort = False
# if sort:
# self.sort = True
# self.activeDirs = sortList(self.activeDirs)
# self.activeDir = self.activeDirs[0]
# self.baseDir = baseDir
#
# def isActive(self):
# if self.activeDirs:
# return True
# else:
# return False
#
# def getNumberActiveDirs(self):
# return len(self.activeDirs)
#
# def getBaseDir(self):
# return self.baseDir
#
# def getActiveDir(self):
# #random.shuffle(self.activeDirs)
# if self.sort:
# self.activeDirs = sortList(self.activeDirs)
# self.activeDir = self.activeDirs[0]
# return self.activeDir
#
# def updateDirs(self,subDirs):
# self.activeDirs.extend(subDirs)
# self.activeDirs.remove(self.activeDir)
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-2.0 |
raycarnes/account-financial-tools | account_tax_analysis/__openerp__.py | 18 | 1258 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Tax analysis",
"version": "1.0",
"depends": ["base", "account"],
"author": "Camptocamp SA,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
"website": "http://www.camptocamp.com",
"license": "AGPL-3",
"data": ["account_tax_analysis_view.xml"],
'installable': True,
"active": False,
}
| agpl-3.0 |
sysbot/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_cache.py | 106 | 16980 | #!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, unittest
from pyutil.assertutil import _assert
from pyutil.humanreadable import hr
from pyutil import memutil
from pyutil import cache
class Bencher:
def __init__(self, klass, MAXREPS=2**8, MAXTIME=5):
print klass
self.klass = klass
self.MAXREPS = MAXREPS
self.MAXTIME = MAXTIME
self.d = {}
self.lrun = None
def _generic_benchmarking_init(self, n):
self.d.clear()
global lrun
self.lrun = self.klass(maxsize=n)
for i in range(n):
self.d[i] = i
self.lrun[n+i] = n+i
def _benchmark_init(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_update(self, n):
MAXSIZE=n/2
d2 = self.klass(maxsize=MAXSIZE)
assert len(d2) == 0
d2.update(self.d)
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_insert(self, n):
MAXSIZE=n/2
d2 = self.klass(maxsize=MAXSIZE)
assert len(d2) == 0
for k, v, in self.d.iteritems():
d2[k] = v
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_init_and_popitem(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for i in range(len(d2), 0, -1):
assert len(d2) == i
d2.popitem()
return True
def _benchmark_init_and_has_key_and_del(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for k in self.d.iterkeys():
if d2.has_key(k):
del d2[k]
return True
def _benchmark_init_and_remove(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for k in self.d.iterkeys():
d2.remove(k, strictkey=False)
return True
def bench(self, BSIZES=(128, 250, 2048, 5000, 2**13, 2**20,)):
from pyutil import benchutil
funcs = ("_benchmark_insert", "_benchmark_init_and_has_key_and_del", "_benchmark_init_and_remove", "_benchmark_init_and_popitem", "_benchmark_update", "_benchmark_init",)
max = 0
for func in funcs:
if len(func) > max:
max = len(func)
for func in funcs:
print func + " " * (max + 1 - len(func))
for BSIZE in BSIZES:
f = getattr(self, func)
benchutil.rep_bench(f, BSIZE, self._generic_benchmarking_init, MAXREPS=self.MAXREPS, MAXTIME=self.MAXTIME)
def quick_bench():
Bencher(cache.LRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15, 2**16,))
Bencher(cache.LinkedListLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
Bencher(cache.SmallLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
def slow_bench():
Bencher(cache.LRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(cache.LinkedListLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(cache.SmallLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 17)])
MUCHADDINGSIZE=2**4
MUCHADDINGNUM = 2**4
# The following parameters are for testing for memory leakage.
MIN_SLOPE = 512.0 # If it leaks less than 512.0 bytes per iteration, then it's probably just some kind of noise from the interpreter or something...
SAMPLES = 2**5
# MIN_SLOPE is high because samples is low, which is because taking a statistically useful numbers of samples takes too long.
# For a *good* test, turn samples up as high as you can stand (maybe 2**10) and set MIN_SLOPE to about 1.0.
# For a *really* good test, add a variance measure to memutil.measure_mem_leakage(), and only consider it to be leaking if the slope is > 0.1 *and* is a "pretty good" fit for the data.
# MIN_SLOPE = 1.0
# SAMPLES = 2**10
class Testy(unittest.TestCase):
def _test_empty_lookup(self, d) :
self.failUnless(d.get('spam') is None)
def _test_key_error(self, C) :
d = C()
try:
d['spam']
self.fail(d)
except KeyError :
pass
def _test_insert_and_get(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
def _test_insert_and_remove(self, d):
d.insert('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
d['spam'] = "eggs"
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
del d['spam']
self.failUnless(not d.has_key('spam'))
def _test_setdefault(self, d):
d.setdefault('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
def _test_extracted_bound_method(self, d):
insmeth = d.insert
insmeth('spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_extracted_unbound_method(self, d):
insumeth = d.__class__.insert
insumeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_unbound_method(self, C, d):
umeth = C.insert
umeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_clear(self, d):
d[11] = 11
d._assert_invariants()
self.failUnless(len(d) == 1)
d.clear()
d._assert_invariants()
self.failUnless(len(d) == 0)
def _test_update(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2={ 'a': 0, 'b': 1, 'c': 2,}
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
def _test_popitem(self, C):
c = C({"a": 1})
res = c.popitem()
_assert(res == ("a", 1,), C, c, res)
self.failUnless(res == ("a", 1,))
def _test_iterate_items(self, C):
c = C({"a": 1})
i = c.iteritems()
x = i.next()
self.failUnless(x == ("a", 1,))
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_keys(self, C):
c = C({"a": 1})
i = c.iterkeys()
x = i.next()
self.failUnless(x == "a")
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_values(self, C):
c = C({"a": 1})
i = c.itervalues()
x = i.next()
self.failUnless(x == 1)
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_LRU_much_adding_some_removing(self, C):
c = C(maxsize=MUCHADDINGSIZE)
for i in range(MUCHADDINGNUM):
c[i] = i
if (i % 400) == 0:
k = random.choice(c.keys())
del c[k]
for i in range(MUCHADDINGSIZE):
c[i] = i
self.failUnless(len(c) == MUCHADDINGSIZE)
def _test_LRU_1(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
def _test_LRU_2(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
def _test_LRU_3(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
c[11] = 12
c._assert_invariants()
c[11] = 13
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 14
c._assert_invariants()
c[11] = 15
c._assert_invariants()
c[11] = 16
c._assert_invariants()
def _test_LRU_full(self, C):
c = C(maxsize=10)
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values(), c.values())
self.failUnless(0 not in c.values())
del c[1]
c._assert_invariants()
self.failUnless(1 not in c.values())
self.failUnless(len(c) == 9)
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
for i in xrange(200):
c[i] = i
c._assert_invariants()
self.failUnless(199 in c.values())
self.failUnless(190 in c.values())
def _test_LRU_has_key(self, C):
c = C(maxsize=10)
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values())
self.failUnless(0 not in c.values())
# c.has_key(1) # this touches `1' and makes it fresher so that it will live and `2' will die next time we overfill.
c[1] = 1 # this touches `1' and makes it fresher so that it will live and `2' will die next time we overfill.
c._assert_invariants()
c[99] = 99
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(not 2 in c.values())
self.failUnless(99 in c.values())
def _test_LRU_not_overfull_on_idempotent_add(self, C):
c = C(maxsize=10)
for i in xrange(11):
c[i] = i
c[1] = "spam"
# Now 1 is the freshest, so 2 is the next one that would be removed *if* we went over limit.
c[3] = "eggs"
self.failUnless(c.has_key(2))
self.failUnless(len(c) == 10)
c._assert_invariants()
def _test_LRU_overflow_on_update(self, C):
d = C(maxsize=10)
self.failUnless(d._assert_invariants())
d2 = {}
for i in range(12):
d2[i] = i
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(len(d) == 10)
def _test_LRU_overflow_on_init(self, C):
d2 = {}
for i in range(12):
d2[i] = i
d = C(d2, maxsize=10)
self.failUnless(d._assert_invariants())
self.failUnless(len(d) == 10)
def _test_em(self):
for klass in (cache.LRUCache, cache.SmallLRUCache,):
for testfunc in (self._test_empty_lookup, self._test_insert_and_get, self._test_insert_and_remove, self._test_extracted_bound_method, self._test_extracted_unbound_method, self._test_clear, self._test_update, self._test_setdefault,):
testfunc(klass())
for testfunc in (self._test_popitem, self._test_iterate_items, self._test_iterate_keys, self._test_iterate_values, self._test_key_error, ):
testfunc(klass)
self._test_unbound_method(klass, klass())
for klass in (cache.LRUCache, cache.SmallLRUCache,):
for testfunc in (self._test_LRU_1, self._test_LRU_2, self._test_LRU_3, self._test_LRU_full, self._test_LRU_has_key, self._test_LRU_not_overfull_on_idempotent_add, self._test_LRU_overflow_on_update, self._test_LRU_overflow_on_init,):
testfunc(klass)
def test_em(self):
self._test_em()
def _mem_test_LRU_much_adding_some_removing(self):
for klass in (cache.LRUCache, cache.SmallLRUCache,):
return self._test_LRU_much_adding_some_removing(klass)
def test_mem_leakage(self):
try:
self._test_mem_leakage()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage # This test takes too long.
def _test_mem_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_mem_leakage_much_adding_some_removing(self):
try:
self._test_mem_leakage_much_adding_some_removing()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage_much_adding_some_removing # This test takes too long.
def _test_mem_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_mem_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self._mem_test_LRU_much_adding_some_removing, "%0.3f" % slope,))
def test_obj_leakage(self):
self._test_obj_leakage()
del test_obj_leakage # This test takes too long.
def _test_obj_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_obj_leakage_much_adding_some_removing(self):
self._test_obj_leakage_much_adding_some_removing()
del test_obj_leakage_much_adding_some_removing # This test takes too long.
def _test_obj_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_obj_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self._mem_test_LRU_much_adding_some_removing, "%0.3f" % slope,))
| gpl-3.0 |
madj4ck/ansible | plugins/callbacks/hipchat.py | 69 | 6961 | # (C) 2014, Matt Martz <matt@sivel.net>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import urllib
import urllib2
from ansible import utils
try:
import prettytable
HAS_PRETTYTABLE = True
except ImportError:
HAS_PRETTYTABLE = False
class CallbackModule(object):
"""This is an example ansible callback plugin that sends status
updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
Requires:
prettytable
"""
def __init__(self):
if not HAS_PRETTYTABLE:
self.disabled = True
utils.warning('The `prettytable` python module is not installed. '
'Disabling the HipChat callback plugin.')
self.msg_uri = 'https://api.hipchat.com/v1/rooms/message'
self.token = os.getenv('HIPCHAT_TOKEN')
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
if self.token is None:
self.disabled = True
utils.warning('HipChat token could not be loaded. The HipChat '
'token can be provided using the `HIPCHAT_TOKEN` '
'environment variable.')
self.printed_playbook = False
self.playbook_name = None
def send_msg(self, msg, msg_format='text', color='yellow', notify=False):
"""Method for sending a message to HipChat"""
params = {}
params['room_id'] = self.room
params['from'] = self.from_name[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['notify'] = int(self.allow_notify and notify)
url = ('%s?auth_token=%s' % (self.msg_uri, self.token))
try:
response = urllib2.urlopen(url, urllib.urlencode(params))
return response.read()
except:
utils.warning('Could not submit message to hipchat')
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
"""Display Playbook and play start messages"""
# This block sends information about a playbook when it starts
# The playbook object is not immediately available at
# playbook_on_start so we grab it via the play
#
# Displays info about playbook being started by a person on an
# inventory, as well as Tags, Skip Tags and Limits
if not self.printed_playbook:
self.playbook_name, _ = os.path.splitext(
os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
self.send_msg("%s: Playbook initiated by %s against %s" %
(self.playbook_name,
self.play.playbook.remote_user,
inventory), notify=True)
self.printed_playbook = True
subset = self.play.playbook.inventory._subset
skip_tags = self.play.playbook.skip_tags
self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
(self.playbook_name,
', '.join(self.play.playbook.only_tags),
', '.join(skip_tags) if skip_tags else None,
', '.join(subset) if subset else subset))
# This is where we actually say we are starting a play
self.send_msg("%s: Starting play: %s" %
(self.playbook_name, name))
def playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
'Failures'])
failures = False
unreachable = False
for h in hosts:
s = stats.summarize(h)
if s['failures'] > 0:
failures = True
if s['unreachable'] > 0:
unreachable = True
t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
'failures']])
self.send_msg("%s: Playbook complete" % self.playbook_name,
notify=True)
if failures or unreachable:
color = 'red'
self.send_msg("%s: Failures detected" % self.playbook_name,
color=color, notify=True)
else:
color = 'green'
self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
| gpl-3.0 |
jbdubois/obus | src/obusgen/c/__init__.py | 1 | 1864 | #===============================================================================
# obusgen - obus source code generator.
#
# @file __init__.py
#
# @brief obus vala source code generator
#
# @author jean-baptiste.dubois@parrot.com
#
# Copyright (c) 2013 Parrot S.A.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Parrot Company nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PARROT COMPANY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===============================================================================
from .obus_c import main
| lgpl-2.1 |
darmaa/odoo | addons/project_mrp/project_mrp.py | 10 | 8111 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class procurement_order(osv.osv):
_name = "procurement.order"
_inherit = "procurement.order"
_columns = {
'task_id': fields.many2one('project.task', 'Task'),
'sale_line_id': fields.many2one('sale.order.line', 'Sales order line')
}
def _is_procurement_task(self, cr, uid, procurement, context=None):
return procurement.product_id.type == 'service' and procurement.product_id.auto_create_task or False
def _assign(self, cr, uid, procurement, context=None):
res = super(procurement_order, self)._assign(cr, uid, procurement, context=context)
if not res:
#if there isn't any specific procurement.rule defined for the product, we may want to create a task
if self._is_procurement_task(cr, uid, procurement, context=context):
return True
return res
def _run(self, cr, uid, procurement, context=None):
if self._is_procurement_task(cr, uid, procurement, context=context) and not procurement.task_id:
#create a task for the procurement
return self._create_service_task(cr, uid, procurement, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if self._is_procurement_task(cr, uid, procurement, context=context):
return procurement.task_id and procurement.task_id.stage_id.closed or False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _convert_qty_company_hours(self, cr, uid, procurement, context=None):
product_uom = self.pool.get('product.uom')
company_time_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id
if procurement.product_uom.id != company_time_uom_id.id and procurement.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, company_time_uom_id.id)
else:
planned_hours = procurement.product_qty
return planned_hours
def _get_project(self, cr, uid, procurement, context=None):
project_project = self.pool.get('project.project')
project = procurement.product_id.project_id
if not project and procurement.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = procurement.sale_line_id.order_id.project_id
project_ids = project_project.search(cr, uid, [('analytic_account_id', '=', account.id)])
projects = project_project.browse(cr, uid, project_ids, context=context)
project = projects and projects[0] or False
return project
def _create_service_task(self, cr, uid, procurement, context=None):
project_task = self.pool.get('project.task')
project = self._get_project(cr, uid, procurement, context=context)
planned_hours = self._convert_qty_company_hours(cr, uid, procurement, context=context)
task_id = project_task.create(cr, uid, {
'name': '%s:%s' % (procurement.origin or '', procurement.product_id.name),
'date_deadline': procurement.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id.id or False,
'user_id': procurement.product_id.product_manager.id,
'procurement_id': procurement.id,
'description': procurement.name + '\n',
'project_id': project and project.id or False,
'company_id': procurement.company_id.id,
},context=context)
self.write(cr, uid, [procurement.id], {'task_id': task_id}, context=context)
self.project_task_create_note(cr, uid, [procurement.id], context=context)
return task_id
def project_task_create_note(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
body = _("Task created")
self.message_post(cr, uid, [procurement.id], body=body, context=context)
if procurement.sale_line_id and procurement.sale_line_id.order_id:
procurement.sale_line_id.order_id.message_post(body=body)
class ProjectTaskStageMrp(osv.Model):
""" Override project.task.type model to add a 'closed' boolean field allowing
to know that tasks in this stage are considered as closed. Indeed since
OpenERP 8.0 status is not present on tasks anymore, only stage_id. """
_name = 'project.task.type'
_inherit = 'project.task.type'
_columns = {
'closed': fields.boolean('Close', help="Tasks in this stage are considered as closed."),
}
_defaults = {
'closed': False,
}
class project_task(osv.osv):
_name = "project.task"
_inherit = "project.task"
_columns = {
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null'),
'sale_line_id': fields.related('procurement_id', 'sale_line_id', type='many2one', relation='sale.order.line', store=True, string='Sales Order Line'),
}
def _validate_subflows(self, cr, uid, ids, context=None):
proc_obj = self.pool.get("procurement.order")
for task in self.browse(cr, uid, ids, context=context):
if task.procurement_id:
proc_obj.check(cr, uid, [task.procurement_id.id], context=context)
def write(self, cr, uid, ids, values, context=None):
""" When closing tasks, validate subflows. """
res = super(project_task, self).write(cr, uid, ids, values, context=context)
if values.get('stage_id'):
stage = self.pool.get('project.task.type').browse(cr, uid, values.get('stage_id'), context=context)
if stage.closed:
self._validate_subflows(cr, uid, ids, context=context)
return res
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
'project_id': fields.many2one('project.project', 'Project', ondelete='set null',),
'auto_create_task': fields.boolean('Create Task Automatically', help="Thick this option if you want to create a task automatically each time this product is sold"),
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed alone, there is no need to create procurements, but with project_mrp
#we must create a procurement for each service that has the auto_create_task boolean set to True.
for line in self.browse(cr, uid, ids, context=context):
if line.product_id and line.product_id.type == 'service' and line.product_id.auto_create_task:
return True
return super(sale_order_line, self).need_procurement(cr, uid, ids, context=context)
| agpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/pdt/package.py | 3 | 3475 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
class Pdt(AutotoolsPackage):
"""Program Database Toolkit (PDT) is a framework for analyzing source
code written in several programming languages and for making rich
program knowledge accessible to developers of static and dynamic
analysis tools. PDT implements a standard program representation,
the program database (PDB), that can be accessed in a uniform way
through a class library supporting common PDB operations.
"""
homepage = "https://www.cs.uoregon.edu/research/pdt/home.php"
url = "http://www.cs.uoregon.edu/research/paracomp/pdtoolkit/Download/pdtoolkit-3.22.1.tar.gz"
version('3.25', '2cad41fcabf4c79cab8780d3b87f7bb4')
version('3.24', 'b8fa5189e5602276ce225ba497b617e4')
version('3.23', 'd61e7a631a27b00e58def52950230a2c')
version('3.22.1', 'b56b9b3e621161c7fd9e4908b944840d')
version('3.22', '982d667617802962a1f7fe6c4c31184f')
version('3.21', '3092ca0d8833b69992c17e63ae66c263')
version('3.20', 'c3edabe202926abe04552e33cd39672d')
version('3.19', '5c5e1e6607086aa13bf4b1b9befc5864')
version('3.18.1', 'e401534f5c476c3e77f05b7f73b6c4f2')
def patch(self):
if self.spec.satisfies('%clang'):
filter_file(r'PDT_GXX=g\+\+ ',
r'PDT_GXX=clang++ ', 'ductape/Makefile')
def configure(self, spec, prefix):
options = ['-prefix=%s' % prefix]
if self.compiler.name == 'xl':
options.append('-XLC')
elif self.compiler.name == 'intel':
options.append('-icpc')
elif self.compiler.name == 'pgi':
options.append('-pgCC')
configure(*options)
@run_after('install')
def link_arch_dirs(self):
# Link arch-specific directories into prefix
for dir in os.listdir(self.prefix):
path = join_path(self.prefix, dir)
if not os.path.isdir(path) or os.path.islink(path):
continue
for d in ('bin', 'lib'):
src = join_path(path, d)
dst = join_path(self.prefix, d)
if os.path.isdir(src) and not os.path.exists(dst):
os.symlink(join_path(dir, d), dst)
| lgpl-2.1 |
elba7r/system | erpnext/accounts/doctype/pos_profile/pos_profile.py | 18 | 2780 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cint
from erpnext.accounts.doctype.sales_invoice.sales_invoice import set_account_for_mode_of_payment
from frappe.model.document import Document
class POSProfile(Document):
def validate(self):
self.check_for_duplicate()
self.validate_all_link_fields()
self.validate_duplicate_groups()
def check_for_duplicate(self):
res = frappe.db.sql("""select name, user from `tabPOS Profile`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.user, self.name, self.company))
if res:
if res[0][1]:
msgprint(_("POS Profile {0} already created for user: {1} and company {2}").format(res[0][0],
res[0][1], self.company), raise_exception=1)
else:
msgprint(_("Global POS Profile {0} already created for company {1}").format(res[0][0],
self.company), raise_exception=1)
def validate_all_link_fields(self):
accounts = {"Account": [self.income_account,
self.expense_account], "Cost Center": [self.cost_center],
"Warehouse": [self.warehouse]}
for link_dt, dn_list in accounts.items():
for link_dn in dn_list:
if link_dn and not frappe.db.exists({"doctype": link_dt,
"company": self.company, "name": link_dn}):
frappe.throw(_("{0} does not belong to Company {1}").format(link_dn, self.company))
def validate_duplicate_groups(self):
item_groups = [d.item_group for d in self.item_groups]
customer_groups = [d.customer_group for d in self.customer_groups]
if len(item_groups) != len(set(item_groups)):
frappe.throw(_("Duplicate item group found in the item group table"), title = "Duplicate Item Group")
if len(customer_groups) != len(set(customer_groups)):
frappe.throw(_("Duplicate customer group found in the cutomer group table"), title = "Duplicate Customer Group")
def before_save(self):
set_account_for_mode_of_payment(self)
def on_update(self):
self.set_defaults()
def on_trash(self):
self.set_defaults(include_current_pos=False)
def set_defaults(self, include_current_pos=True):
frappe.defaults.clear_default("is_pos")
if not include_current_pos:
condition = " where name != '%s'" % self.name.replace("'", "\'")
else:
condition = ""
pos_view_users = frappe.db.sql_list("""select user
from `tabPOS Profile` {0}""".format(condition))
for user in pos_view_users:
if user:
frappe.defaults.set_user_default("is_pos", 1, user)
else:
frappe.defaults.set_global_default("is_pos", 1)
@frappe.whitelist()
def get_series():
return frappe.get_meta("Sales Invoice").get_field("naming_series").options or ""
| gpl-3.0 |
pquentin/django | django/contrib/admindocs/tests/test_fields.py | 638 | 1172 | from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
| bsd-3-clause |
lucienfostier/gaffer | python/GafferUITest/BackgroundMethodTest.py | 2 | 6726 | ##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import time
import six
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class BackgroundMethodTest( GafferUITest.TestCase ) :
class TestWidget( GafferUI.NumericWidget ) :
def __init__( self, **kw ) :
GafferUI.NumericWidget.__init__( self, 0, **kw )
self.__script = Gaffer.ScriptNode()
self.__script["n"] = GafferTest.AddNode()
self.numPreCalls = 0
self.numBackgroundCalls = 0
self.numPostCalls = 0
def node( self ) :
return self.__script["n"]
@GafferUI.BackgroundMethod()
def updateInBackground( self, arg ) :
self.numBackgroundCalls += 1
self.backgroundCallArg = arg
self.backgroundCallThreadId = six.moves._thread.get_ident()
canceller = Gaffer.Context.current().canceller()
# Give the main thread time to cancel, so we
# can test `cancelWhenHidden`.
for i in range( 0, 100 ) :
IECore.Canceller.check( canceller )
time.sleep( 0.01 )
# Simulate an error if we've been asked to.
if getattr( self, "throw", False ) :
raise Exception( "Oops!" )
return self.__script["n"]["sum"].getValue()
@updateInBackground.preCall
def __updateInBackgroundPreCall( self ) :
self.numPreCalls += 1
self.preCallThreadId = six.moves._thread.get_ident()
self.setEnabled( False )
@updateInBackground.postCall
def __updateInBackgroundPostCall( self, value ) :
self.postCallArg = value
self.numPostCalls += 1
self.postCallThreadId = six.moves._thread.get_ident()
self.setValue( value if isinstance( value, int ) else -1 )
self.setEnabled( True )
@updateInBackground.plug
def __setTextPlug( self ) :
return self.__script["n"]["sum"]
class WaitingSlot( GafferTest.CapturingSlot ) :
def __init__( self, signal ) :
GafferTest.CapturingSlot.__init__( self, signal )
def wait( self ) :
while len( self ) == 0 :
GafferUI.EventLoop.waitForIdle()
def test( self ) :
with GafferUI.Window() as window :
w = self.TestWidget()
window.setVisible( True )
self.assertFalse( w.updateInBackground.running( w ) )
self.assertEqual( w.numPreCalls, 0 )
self.assertEqual( w.numBackgroundCalls, 0 )
self.assertEqual( w.numPostCalls, 0 )
w.node()["op1"].setValue( 1 )
ws = self.WaitingSlot( w.valueChangedSignal() )
w.updateInBackground( 100 )
self.assertEqual( w.getEnabled(), False )
self.assertEqual( w.getValue(), 0 )
self.assertTrue( w.updateInBackground.running( w ) )
ws.wait()
self.assertFalse( w.updateInBackground.running( w ) )
self.assertEqual( w.getValue(), 1 )
self.assertEqual( w.numPreCalls, 1 )
self.assertEqual( w.numBackgroundCalls, 1 )
self.assertEqual( w.numPostCalls, 1 )
self.assertEqual( w.postCallArg, 1 )
self.assertEqual( w.backgroundCallArg, 100 )
self.assertNotEqual( w.backgroundCallThreadId, six.moves._thread.get_ident() )
self.assertEqual( w.preCallThreadId, six.moves._thread.get_ident() )
self.assertEqual( w.postCallThreadId, six.moves._thread.get_ident() )
def testCancelWhenHidden( self ) :
with GafferUI.Window() as window :
w = self.TestWidget()
window.setVisible( True )
ws = self.WaitingSlot( w.valueChangedSignal() )
w.updateInBackground( 1 )
window.setVisible( False )
ws.wait()
self.assertEqual( w.getValue(), -1 )
self.assertEqual( w.numPreCalls, 1 )
# Background function may have been cancelled before
# it even started, in which case it will not even have
# been called.
self.assertIn( w.numBackgroundCalls, { 0, 1 } )
# But no matter what, we always expect a matching postCall
# for the original preCall.
self.assertEqual( w.numPostCalls, 1 )
self.assertIsInstance( w.postCallArg, IECore.Cancelled )
def testExceptions( self ) :
with GafferUI.Window() as window :
w = self.TestWidget()
w.throw = True
window.setVisible( True )
ws = self.WaitingSlot( w.valueChangedSignal() )
w.updateInBackground( 1000 )
ws.wait()
self.assertEqual( w.getValue(), -1 )
self.assertEqual( w.numPreCalls, 1 )
self.assertEqual( w.numBackgroundCalls, 1 )
self.assertEqual( w.numPostCalls, 1 )
self.assertIsInstance( w.postCallArg, Exception )
def testSecondCallSupercedesFirst( self ) :
with GafferUI.Window() as window :
w = self.TestWidget()
window.setVisible( True )
w.node()["op1"].setValue( 2 )
ws = self.WaitingSlot( w.valueChangedSignal() )
w.updateInBackground( 10 )
w.updateInBackground( 11 )
ws.wait()
self.assertEqual( w.getValue(), 2 )
# Second call re-uses the first precall
self.assertEqual( w.numPreCalls, 1 )
# The first call may have got started before
# it was cancelled by the second, or it may
# not.
self.assertIn( w.numBackgroundCalls, { 1, 2 } )
# But either way the first call doesn't make it to
# the post-call stage.
self.assertEqual( w.numPostCalls, 1 )
self.assertEqual( w.backgroundCallArg, 11 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
thompsjj/MTA_delay_prediction | system_model/sql_interface.py | 1 | 2246 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 24 08:02:02 2015
@author: Jared J. Thompson
"""
import psycopg2
from psycopg2.extras import DictCursor
import sys,os
def connect_to_db(dbname, user, host, password, port='5432' ):
try:
conn = psycopg2.connect(dbname=dbname, user=user, \
host=host, password=password,port=port)
conn.autocommit=True
except:
print "Unable to connect to the database"
sys.exit(0)
cursor = conn.cursor()
cursor.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'""")
for table in cursor.fetchall():
print(table)
return cursor, conn
def connect_to_local_db(name, user, password='user'):
try:
conn = psycopg2.connect(database=name, user=user, host='localhost', password=password)
conn.autocommit=True
except:
print "Unable to connect to the database"
sys.exit(0)
cursor = conn.cursor()
cursor.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'""")
for table in cursor.fetchall():
print(table)
return cursor, conn
def sample_local_db_dict_cursor(name, user, password='user'):
try:
conn = psycopg2.connect(database=name, user=user, host='localhost', password=password)
except:
print "Unable to connect to the database"
sys.exit(0)
cursor = conn.cursor(cursor_factory=DictCursor)
cursor.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'""")
for table in cursor.fetchall():
print(table)
return cursor, conn
def table_exists(cur, table_str):
exists = False
if cur.closed==True:
print "cursor is closed."
return False
try:
cur.execute("SELECT EXISTS(SELECT relname FROM pg_class WHERE relname='" + table_str + "')")
exists = cur.fetchone()[0]
except psycopg2.Error as e:
print e
sys.exit(0)
return exists
def drop_table(cur, table_name):
if table_exists(cur, table_name):
if cur.closed==False:
cur.execute("DROP TABLE %s" % (table_name))
else:
print 'cursor is closed.' | mit |
davidmwine/gitinspector | gitinspector/blame.py | 47 | 11442 | # coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
from changes import FileDiff
import comment
import changes
import datetime
import filtering
import format
import gravatar
import interval
import multiprocessing
import re
import subprocess
import sys
import terminal
import textwrap
import threading
NUM_THREADS = multiprocessing.cpu_count()
class BlameEntry:
rows = 0
skew = 0 # Used when calculating average code age.
comments = 0
__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)
__blame_lock__ = threading.Lock()
AVG_DAYS_PER_MONTH = 30.4167
class BlameThread(threading.Thread):
def __init__(self, useweeks, changes, blame_string, extension, blames, filename):
__thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self)
self.useweeks = useweeks
self.changes = changes
self.blame_string = blame_string
self.extension = extension
self.blames = blames
self.filename = filename
self.is_inside_comment = False
def __clear_blamechunk_info__(self):
self.blamechunk_email = None
self.blamechunk_is_last = False
self.blamechunk_is_prior = False
self.blamechunk_revision = None
self.blamechunk_time = None
def __handle_blamechunk_content__(self, content):
author = None
(comments, self.is_inside_comment) = comment.handle_comment_block(self.is_inside_comment, self.extension, content)
if self.blamechunk_is_prior and interval.get_since():
return
try:
author = self.changes.get_latest_author_by_email(self.blamechunk_email)
except KeyError:
return
__blame_lock__.acquire() # Global lock used to protect calls from here...
if not filtering.set_filtered(author, "author") and not filtering.set_filtered(self.blamechunk_email, "email") and not \
filtering.set_filtered(self.blamechunk_revision, "revision"):
if self.blames.get((author, self.filename), None) == None:
self.blames[(author, self.filename)] = BlameEntry()
self.blames[(author, self.filename)].comments += comments
self.blames[(author, self.filename)].rows += 1
if (self.blamechunk_time - self.changes.first_commit_date).days > 0:
self.blames[(author, self.filename)].skew += ((self.changes.last_commit_date - self.blamechunk_time).days /
(7.0 if self.useweeks else AVG_DAYS_PER_MONTH))
__blame_lock__.release() # ...to here.
def run(self):
git_blame_r = subprocess.Popen(self.blame_string, shell=True, bufsize=1, stdout=subprocess.PIPE).stdout
rows = git_blame_r.readlines()
git_blame_r.close()
self.__clear_blamechunk_info__()
for j in range(0, len(rows)):
row = rows[j].decode("utf-8", "replace").strip()
keyval = row.split(" ", 2)
if self.blamechunk_is_last:
self.__handle_blamechunk_content__(row)
self.__clear_blamechunk_info__()
elif keyval[0] == "boundary":
self.blamechunk_is_prior = True
elif keyval[0] == "author-mail":
self.blamechunk_email = keyval[1].lstrip("<").rstrip(">")
elif keyval[0] == "author-time":
self.blamechunk_time = datetime.date.fromtimestamp(int(keyval[1]))
elif keyval[0] == "filename":
self.blamechunk_is_last = True
elif Blame.is_revision(keyval[0]):
self.blamechunk_revision = keyval[0]
__thread_lock__.release() # Lock controlling the number of threads running
PROGRESS_TEXT = N_("Checking how many rows belong to each author (Progress): {0:.0f}%")
class Blame:
def __init__(self, hard, useweeks, changes):
self.blames = {}
ls_tree_r = subprocess.Popen("git ls-tree --name-only -r " + interval.get_ref(), shell=True, bufsize=1,
stdout=subprocess.PIPE).stdout
lines = ls_tree_r.readlines()
for i, row in enumerate(lines):
row = row.strip().decode("unicode_escape", "ignore")
row = row.encode("latin-1", "replace")
row = row.decode("utf-8", "replace").strip("\"").strip("'").strip()
if FileDiff.is_valid_extension(row) and not filtering.set_filtered(FileDiff.get_filename(row)):
blame_string = "git blame --line-porcelain -w {0} ".format("-C -C -M" if hard else "") + \
interval.get_since() + interval.get_ref() + " -- \"" + row + "\""
thread = BlameThread(useweeks, changes, blame_string, FileDiff.get_extension(row), self.blames, row.strip())
thread.daemon = True
thread.start()
if hard:
Blame.output_progress(i, len(lines))
# Make sure all threads have completed.
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
@staticmethod
def output_progress(pos, length):
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
print(_(PROGRESS_TEXT).format(100 * pos / length), end="")
sys.stdout.flush()
@staticmethod
def is_revision(string):
revision = re.search("([0-9a-f]{40})", string)
if revision == None:
return False
return revision.group(1).strip()
@staticmethod
def get_stability(author, blamed_rows, changes):
if author in changes.get_authorinfo_list():
return 100.0 * blamed_rows / changes.get_authorinfo_list()[author].insertions
return 100
@staticmethod
def get_time(string):
time = re.search(" \(.*?(\d\d\d\d-\d\d-\d\d)", string)
return time.group(1).strip()
def get_summed_blames(self):
summed_blames = {}
for i in self.blames.items():
if summed_blames.get(i[0][0], None) == None:
summed_blames[i[0][0]] = BlameEntry()
summed_blames[i[0][0]].rows += i[1].rows
summed_blames[i[0][0]].skew += i[1].skew
summed_blames[i[0][0]].comments += i[1].comments
return summed_blames
__blame__ = None
def get(hard, useweeks, changes):
global __blame__
if __blame__ == None:
__blame__ = Blame(hard, useweeks, changes)
return __blame__
BLAME_INFO_TEXT = N_("Below are the number of rows from each author that have survived and are still "
"intact in the current revision")
class BlameOutput(Outputable):
def __init__(self, hard, useweeks):
if format.is_interactive_format():
print("")
self.hard = hard
self.useweeks = useweeks
self.changes = changes.get(hard)
get(self.hard, self.useweeks, self.changes)
Outputable.__init__(self)
def output_html(self):
blame_xml = "<div><div class=\"box\">"
blame_xml += "<p>" + _(BLAME_INFO_TEXT) + ".</p><div><table id=\"blame\" class=\"git\">"
blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format(
_("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments"))
blame_xml += "<tbody>"
chart_data = ""
blames = sorted(__blame__.get_summed_blames().items())
total_blames = 0
for i in blames:
total_blames += i[1].rows
for i, entry in enumerate(blames):
work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames))
blame_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">")
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(entry[0])
blame_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(author_email), entry[0])
else:
blame_xml += "<td>" + entry[0] + "</td>"
blame_xml += "<td>" + str(entry[1].rows) + "</td>"
blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>")
blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>"
blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>"
blame_xml += "<td style=\"display: none\">" + work_percentage + "</td>"
blame_xml += "</tr>"
chart_data += "{{label: \"{0}\", data: {1}}}".format(entry[0], work_percentage)
if blames[-1] != entry:
chart_data += ", "
blame_xml += "<tfoot><tr> <td colspan=\"5\"> </td> </tr></tfoot></tbody></table>"
blame_xml += "<div class=\"chart\" id=\"blame_chart\"></div></div>"
blame_xml += "<script type=\"text/javascript\">"
blame_xml += " blame_plot = $.plot($(\"#blame_chart\"), [{0}], {{".format(chart_data)
blame_xml += " series: {"
blame_xml += " pie: {"
blame_xml += " innerRadius: 0.4,"
blame_xml += " show: true,"
blame_xml += " combine: {"
blame_xml += " threshold: 0.01,"
blame_xml += " label: \"" + _("Minor Authors") + "\""
blame_xml += " }"
blame_xml += " }"
blame_xml += " }, grid: {"
blame_xml += " hoverable: true"
blame_xml += " }"
blame_xml += " });"
blame_xml += "</script></div></div>"
print(blame_xml)
def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Rows"), 10) + terminal.rjust(_("Stability"), 15) +
terminal.rjust(_("Age"), 13) + terminal.rjust(_("% in comments"), 20))
for i in sorted(__blame__.get_summed_blames().items()):
print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ")
print(str(i[1].rows).rjust(10), end=" ")
print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ")
print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ")
print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19))
def output_xml(self):
message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n"
blame_xml = ""
for i in sorted(__blame__.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n"
stability_xml = ("\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + "</stability>\n")
age_xml = ("\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n")
percentage_in_comments_xml = ("\t\t\t\t<percentage-in-comments>" + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) +
"</percentage-in-comments>\n")
blame_xml += ("\t\t\t<author>\n" + name_xml + gravatar_xml + rows_xml + stability_xml + age_xml +
percentage_in_comments_xml + "\t\t\t</author>\n")
print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>")
| gpl-3.0 |
caphrim007/ansible | lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py | 14 | 7728 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_host_pm
short_description: Module to manage power management of hosts in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage power management of hosts in oVirt/RHV."
options:
name:
description:
- "Name of the host to manage."
required: true
aliases: ['host']
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
address:
description:
- "Address of the power management interface."
username:
description:
- "Username to be used to connect to power management interface."
password:
description:
- "Password of the user specified in C(username) parameter."
type:
description:
- "Type of the power management. oVirt/RHV predefined values are I(drac5), I(ipmilan), I(rsa),
I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
but user can have defined custom type."
port:
description:
- "Power management interface port."
options:
description:
- "Dictionary of additional fence agent options (including Power Management slot)."
- "Additional information about options can be found at U(https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md)."
encrypt_options:
description:
- "If I(true) options will be encrypted when send to agent."
aliases: ['encrypt']
order:
description:
- "Integer value specifying, by default it's added at the end."
version_added: "2.5"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add fence agent to host 'myhost'
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
username: admin
password: admin
port: 3333
type: ipmilan
# Add fence agent to host 'myhost' using 'slot' option
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
slot: myslot
username: admin
password: admin
port: 3333
type: ipmilan
# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
- ovirt_host_pm:
state: absent
name: myhost
address: 1.2.3.4
type: ipmilan
'''
RETURN = '''
id:
description: ID of the agent which is managed
returned: On success if agent is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
agent:
description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/agent."
returned: On success if agent is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class HostModule(BaseModule):
def build_entity(self):
return otypes.Host(
power_management=otypes.PowerManagement(
enabled=True,
),
)
def update_check(self, entity):
return equal(True, entity.power_management.enabled)
class HostPmModule(BaseModule):
def build_entity(self):
return otypes.Agent(
address=self._module.params['address'],
encrypt_options=self._module.params['encrypt_options'],
options=[
otypes.Option(
name=name,
value=value,
) for name, value in self._module.params['options'].items()
] if self._module.params['options'] else None,
password=self._module.params['password'],
port=self._module.params['port'],
type=self._module.params['type'],
username=self._module.params['username'],
order=self._module.params.get('order', 100),
)
def update_check(self, entity):
return (
equal(self._module.params.get('address'), entity.address) and
equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
equal(self._module.params.get('password'), entity.password) and
equal(self._module.params.get('username'), entity.username) and
equal(self._module.params.get('port'), entity.port) and
equal(self._module.params.get('type'), entity.type) and
equal(self._module.params.get('order'), entity.order)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True, aliases=['host']),
address=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
type=dict(default=None),
port=dict(default=None, type='int'),
order=dict(default=None, type='int'),
options=dict(default=None, type='dict'),
encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['name'])
fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
host_pm_module = HostPmModule(
connection=connection,
module=module,
service=fence_agents_service,
)
host_module = HostModule(
connection=connection,
module=module,
service=hosts_service,
)
state = module.params['state']
if state == 'present':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.create(entity=agent)
# Enable Power Management, if it's not enabled:
host_module.create(entity=host)
elif state == 'absent':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.remove(entity=agent)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
ronie/script.cu.lrclyrics | resources/lib/mutagen_culrc/_constants.py | 45 | 3393 | # -*- coding: utf-8 -*-
"""Constants used by Mutagen."""
GENRES = [
u"Blues",
u"Classic Rock",
u"Country",
u"Dance",
u"Disco",
u"Funk",
u"Grunge",
u"Hip-Hop",
u"Jazz",
u"Metal",
u"New Age",
u"Oldies",
u"Other",
u"Pop",
u"R&B",
u"Rap",
u"Reggae",
u"Rock",
u"Techno",
u"Industrial",
u"Alternative",
u"Ska",
u"Death Metal",
u"Pranks",
u"Soundtrack",
u"Euro-Techno",
u"Ambient",
u"Trip-Hop",
u"Vocal",
u"Jazz+Funk",
u"Fusion",
u"Trance",
u"Classical",
u"Instrumental",
u"Acid",
u"House",
u"Game",
u"Sound Clip",
u"Gospel",
u"Noise",
u"Alt. Rock",
u"Bass",
u"Soul",
u"Punk",
u"Space",
u"Meditative",
u"Instrumental Pop",
u"Instrumental Rock",
u"Ethnic",
u"Gothic",
u"Darkwave",
u"Techno-Industrial",
u"Electronic",
u"Pop-Folk",
u"Eurodance",
u"Dream",
u"Southern Rock",
u"Comedy",
u"Cult",
u"Gangsta Rap",
u"Top 40",
u"Christian Rap",
u"Pop/Funk",
u"Jungle",
u"Native American",
u"Cabaret",
u"New Wave",
u"Psychedelic",
u"Rave",
u"Showtunes",
u"Trailer",
u"Lo-Fi",
u"Tribal",
u"Acid Punk",
u"Acid Jazz",
u"Polka",
u"Retro",
u"Musical",
u"Rock & Roll",
u"Hard Rock",
u"Folk",
u"Folk-Rock",
u"National Folk",
u"Swing",
u"Fast-Fusion",
u"Bebop",
u"Latin",
u"Revival",
u"Celtic",
u"Bluegrass",
u"Avantgarde",
u"Gothic Rock",
u"Progressive Rock",
u"Psychedelic Rock",
u"Symphonic Rock",
u"Slow Rock",
u"Big Band",
u"Chorus",
u"Easy Listening",
u"Acoustic",
u"Humour",
u"Speech",
u"Chanson",
u"Opera",
u"Chamber Music",
u"Sonata",
u"Symphony",
u"Booty Bass",
u"Primus",
u"Porn Groove",
u"Satire",
u"Slow Jam",
u"Club",
u"Tango",
u"Samba",
u"Folklore",
u"Ballad",
u"Power Ballad",
u"Rhythmic Soul",
u"Freestyle",
u"Duet",
u"Punk Rock",
u"Drum Solo",
u"A Cappella",
u"Euro-House",
u"Dance Hall",
u"Goa",
u"Drum & Bass",
u"Club-House",
u"Hardcore",
u"Terror",
u"Indie",
u"BritPop",
u"Afro-Punk",
u"Polsk Punk",
u"Beat",
u"Christian Gangsta Rap",
u"Heavy Metal",
u"Black Metal",
u"Crossover",
u"Contemporary Christian",
u"Christian Rock",
u"Merengue",
u"Salsa",
u"Thrash Metal",
u"Anime",
u"JPop",
u"Synthpop",
u"Abstract",
u"Art Rock",
u"Baroque",
u"Bhangra",
u"Big Beat",
u"Breakbeat",
u"Chillout",
u"Downtempo",
u"Dub",
u"EBM",
u"Eclectic",
u"Electro",
u"Electroclash",
u"Emo",
u"Experimental",
u"Garage",
u"Global",
u"IDM",
u"Illbient",
u"Industro-Goth",
u"Jam Band",
u"Krautrock",
u"Leftfield",
u"Lounge",
u"Math Rock",
u"New Romantic",
u"Nu-Breakz",
u"Post-Punk",
u"Post-Rock",
u"Psytrance",
u"Shoegaze",
u"Space Rock",
u"Trop Rock",
u"World Music",
u"Neoclassical",
u"Audiobook",
u"Audio Theatre",
u"Neue Deutsche Welle",
u"Podcast",
u"Indie Rock",
u"G-Funk",
u"Dubstep",
u"Garage Rock",
u"Psybient",
]
"""The ID3v1 genre list."""
| gpl-2.0 |
navodissa/python-flask | flask/lib/python2.7/site-packages/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| bsd-3-clause |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/05 citi/parseJSON.py | 26 | 1412 |
def getSocialData(post):
# Get Thread Object
threadObject = post["thread"]
domain_rank = threadObject["domain_rank"] #domain_rank
#print 'domain_rank:' + str(domain_rank)
socialObject = threadObject["social"] #social data object
facebookData = socialObject["facebook"] #facebook data
#print 'facebook data:' + str(facebookData["likes"]) + ', ' + str(facebookData["comments"]) + ', ' + str(facebookData["shares"])
fb_likes = facebookData["likes"]
fb_comments = facebookData["comments"]
fb_shares = facebookData["shares"]
gplusData = socialObject["gplus"] #gplus data
#print 'gplus data:' + str(gplusData["shares"])
g_shares = gplusData["shares"]
pinterestData = socialObject["pinterest"] #pinterest data
#print 'pinterest data:' + str(pinterestData["shares"])
pin_shares = pinterestData["shares"]
linkedinData = socialObject["linkedin"] #linkedin data
#print 'linked data:' + str(linkedinData["shares"])
linkedin_shares = linkedinData["shares"]
stumbleduponData= socialObject["stumbledupon"]
#print 'lstumbleduponData:' + str(stumbleduponData["shares"])
su_shares = stumbleduponData["shares"]
vkData = socialObject["vk"]
#print 'vkData:' + str(vkData["shares"])
vk_shares = vkData["shares"]
social_impact = (fb_likes + fb_comments + fb_shares + g_shares + pin_shares + linkedin_shares + su_shares + vk_shares)
#print str(social_impact)
return social_impact | mit |
jonzobrist/Percona-Server-5.1 | kewpie/lib/sys_mgmt/port_management.py | 1 | 6830 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""port_management.py
code for dealing with the various tasks
around handing out and managing server ports
that we need to run tests
"""
# imports
import os
import sys
class portManager:
""" class for doing the work of handing out and tracking ports """
def __init__(self, system_manager, debug = 0):
# This is a file that can be read into a dictionary
# it is in port:owner format
self.skip_keys = [ 'port_file_delimiter'
, 'system_manager'
]
self.working_dir = "/tmp"
self.file_prefix = "dbqp_port"
self.port_file_delimiter = ':' # what we use to separate port:owner
self.debug = debug
self.logging = system_manager.logging
self.system_manager = system_manager
self.logging.debug_class(self)
def get_port_block(self, requester, base_port, block_size):
""" Try to return a block of ports of size
block_size, starting with base_port
We take a target port and increment it
until we find an unused port. We make
no guarantee of continuous ports, only
that we will try to return block_size
ports for use
We can probably get fancier / smarter in the future
but this should work for now :-/
"""
assigned_ports = []
current_port = base_port
while len(assigned_ports) != block_size:
new_port = (self.get_port(requester, current_port))
assigned_ports.append(new_port)
current_port = new_port+1
return assigned_ports
def get_port(self, requester, desired_port):
""" Try to lock in the desired_port
if not, we increment the value until
we find an unused port.
We take max / min port values from test-run.pl
This is a bit bobo, but will work for now...
"""
searching_for_port = 1
attempt_count = 5000
attempts_remain = attempt_count
max_port_value = 32767
min_port_value = 5001
while searching_for_port and attempts_remain:
# Check if the port is used
if self.check_port_status(desired_port):
# assign it
self.assign_port(requester, desired_port)
return desired_port
else: # increment the port and try again
desired_port = desired_port + 1
if desired_port >= max_port_value:
desired_port = min_port_value
attempts_remain = attempts_remain - 1
self.logging.error("Failed to assign a port in %d attempts" %attempt_count)
sys.exit(1)
def check_port_status(self, port):
""" Check if a port is in use, via the port files
which all copies of dbqp.py should use
Not *really* sure how well this works with multiple
dbqp.py instances...we'll see if we even need it
to work
"""
# check existing ports dbqp has created
dbqp_ports = self.check_dbqp_ports()
if port not in dbqp_ports and not self.is_port_used(port):
return 1
else:
return 0
def is_port_used(self, port):
""" See if a given port is used on the system """
retcode, output = self.system_manager.execute_cmd("netstat -lant")
# parse our output
entry_list = output.split("\n")
good_data = 0
for entry in entry_list:
if entry.startswith('Proto'):
good_data = 1
elif good_data:
# We try to catch additional output
# like we see with freebsd
if entry.startswith('Active'):
good_data = 0
pass
else:
if self.system_manager.cur_os in [ 'FreeBSD'
, 'Darwin'
]:
split_token = '.'
else:
split_token = ':'
port_candidate = entry.split()[3].split(split_token)[-1].strip()
if port_candidate.isdigit():
used_port = int(port_candidate)
else:
used_port = None # not a value we can use
if port == used_port:
if entry.split()[-1] != "TIME_WAIT":
return 1
return 0
def check_dbqp_ports(self):
""" Scan the files in /tmp for those files named
dbqp_port_NNNN. Existence indicates said port is 'locked'
"""
used_ports = []
tmp_files = os.listdir('/tmp')
for tmp_file in tmp_files:
if tmp_file.startswith('dbqp_port'):
used_ports.append(int(tmp_file.split('_')[-1]))
return used_ports
def assign_port(self, owner, port):
"""Assigns a port - create a tmpfile
with a name that 'logs' the port
as being used
"""
out_file = open(self.get_file_name(port),'w')
out_file.write("%s:%d\n" %(owner, port))
out_file.close()
def free_ports(self, portlist):
""" Clean up our ports """
for port in portlist:
self.free_port(port)
def free_port(self, port):
""" Free a single port - we delete the file
that 'locks' it
"""
self.logging.debug("Freeing port %d" %(port))
os.remove(self.get_file_name(port))
def get_file_name(self, port):
""" We generate a file name for the port """
port_file_name = "%s_%s_%d" %(self.file_prefix, self.system_manager.cur_user, port )
return os.path.join(self.working_dir, port_file_name)
| bsd-3-clause |
MySQLOnRocksDB/mysql-5.6 | xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/bug884737_test.py | 24 | 4796 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import time
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
def test_bug884737(self):
""" xtrabackup's --parallel option asserts / crashes with a value of -1 """
self.servers = servers
logging = test_executor.logging
if servers[0].type not in ['mysql','percona']:
return
else:
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# take a backup
# for -1, it defaults to 1
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--parallel=-1"
, "--user=root"
, "--port=%d" %master_server.master_port
, "--host=127.0.0.1"
, "--no-timestamp"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0,output)
expected_warning = "Warning: option 'parallel': signed value -1 adjusted to 1"
self.assertTrue(expected_warning in output, output)
# stop the server
master_server.stop()
# do prepare on backup
cmd = [ innobackupex
, "--apply-log"
, "--no-timestamp"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# remove old datadir
shutil.rmtree(master_server.datadir)
os.mkdir(master_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %(xtrabackup)
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, output)
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertEqual(master_server.status,1, 'Server failed restart from restored datadir...')
# Check the server is ok
query = "SELECT COUNT(*) FROM test.DD"
expected_output = ((100L,),)
retcode, output = self.execute_query(query, master_server)
self.assertEqual(output, expected_output, msg = "%s || %s" %(output, expected_output))
| gpl-2.0 |
smolix/incubator-mxnet | python/mxnet/symbol_doc.py | 44 | 10153 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=unused-argument, too-many-arguments
"""Extra symbol documents
Guidelines
----------
To add extra doc to the operator `XXX`, write a class `XXXDoc`, deriving
from the base class `SymbolDoc`, and put the extra doc as the docstring
of `XXXDoc`.
The document added here should be Python-specific. Documents that are useful
for all language bindings should be added to the C++ side where the operator
is defined / registered.
The code snippet in the docstring will be run using `doctest`. During running,
the environment will have access to
- all the global names in this file (e.g. `SymbolDoc`)
- all the operators (e.g. `FullyConnected`)
- the name `test_utils` for `mx.test_utils` (e.g. `test_utils.reldiff`)
- the name `mx` (e.g. `mx.nd.zeros`)
- the name `np`
The following documents are recommended:
- *Examples*: simple and short code snippet showing how to use this operator.
It should show typical calling examples and behaviors (e.g. maps an input
of what shape to an output of what shape).
- *Regression Test*: longer test code for the operators. We normally do not
expect the users to read those, but they will be executed by `doctest` to
ensure the behavior of each operator does not change unintentionally.
"""
from __future__ import absolute_import as _abs
import re as _re
from .base import build_param_doc as _build_param_doc
class SymbolDoc(object):
"""The base class for attaching doc to operators."""
@staticmethod
def get_output_shape(sym, **input_shapes):
"""Get user friendly information of the output shapes."""
_, s_outputs, _ = sym.infer_shape(**input_shapes)
return dict(zip(sym.list_outputs(), s_outputs))
class ActivationDoc(SymbolDoc):
"""
Examples
--------
A one-hidden-layer MLP with ReLU activation:
>>> data = Variable('data')
>>> mlp = FullyConnected(data=data, num_hidden=128, name='proj')
>>> mlp = Activation(data=mlp, act_type='relu', name='activation')
>>> mlp = FullyConnected(data=mlp, num_hidden=10, name='mlp')
>>> mlp
<Symbol mlp>
Regression Test
---------------
ReLU activation
>>> test_suites = [
... ('relu', lambda x: np.maximum(x, 0)),
... ('sigmoid', lambda x: 1 / (1 + np.exp(-x))),
... ('tanh', lambda x: np.tanh(x)),
... ('softrelu', lambda x: np.log(1 + np.exp(x)))
... ]
>>> x = test_utils.random_arrays((2, 3, 4))
>>> for act_type, numpy_impl in test_suites:
... op = Activation(act_type=act_type, name='act')
... y = test_utils.simple_forward(op, act_data=x)
... y_np = numpy_impl(x)
... print('%s: %s' % (act_type, test_utils.almost_equal(y, y_np)))
relu: True
sigmoid: True
tanh: True
softrelu: True
"""
class DropoutDoc(SymbolDoc):
"""
Examples
--------
Apply dropout to corrupt input as zero with probability 0.2:
>>> data = Variable('data')
>>> data_dp = Dropout(data=data, p=0.2)
Regression Test
---------------
>>> shape = (100, 100) # take larger shapes to be more statistical stable
>>> x = np.ones(shape)
>>> op = Dropout(p=0.5, name='dp')
>>> # dropout is identity during testing
>>> y = test_utils.simple_forward(op, dp_data=x, is_train=False)
>>> test_utils.almost_equal(x, y)
True
>>> y = test_utils.simple_forward(op, dp_data=x, is_train=True)
>>> # expectation is (approximately) unchanged
>>> np.abs(x.mean() - y.mean()) < 0.1
True
>>> set(np.unique(y)) == set([0, 2])
True
"""
class EmbeddingDoc(SymbolDoc):
"""
Examples
--------
Assume we want to map the 26 English alphabet letters to 16-dimensional
vectorial representations.
>>> vocabulary_size = 26
>>> embed_dim = 16
>>> seq_len, batch_size = (10, 64)
>>> input = Variable('letters')
>>> op = Embedding(data=input, input_dim=vocabulary_size, output_dim=embed_dim,
... name='embed')
>>> SymbolDoc.get_output_shape(op, letters=(seq_len, batch_size))
{'embed_output': (10L, 64L, 16L)}
Regression Test
---------------
>>> vocab_size, embed_dim = (26, 16)
>>> batch_size = 12
>>> word_vecs = test_utils.random_arrays((vocab_size, embed_dim))
>>> op = Embedding(name='embed', input_dim=vocab_size, output_dim=embed_dim)
>>> x = np.random.choice(vocab_size, batch_size)
>>> y = test_utils.simple_forward(op, embed_data=x, embed_weight=word_vecs)
>>> y_np = word_vecs[x]
>>> test_utils.almost_equal(y, y_np)
True
"""
class FlattenDoc(SymbolDoc):
"""
Examples
--------
Flatten is usually applied before `FullyConnected`, to reshape the 4D tensor
produced by convolutional layers to 2D matrix:
>>> data = Variable('data') # say this is 4D from some conv/pool
>>> flatten = Flatten(data=data, name='flat') # now this is 2D
>>> SymbolDoc.get_output_shape(flatten, data=(2, 3, 4, 5))
{'flat_output': (2L, 60L)}
Regression Test
---------------
>>> test_dims = [(2, 3, 4, 5), (2, 3), (2,)]
>>> op = Flatten(name='flat')
>>> for dims in test_dims:
... x = test_utils.random_arrays(dims)
... y = test_utils.simple_forward(op, flat_data=x)
... y_np = x.reshape((dims[0], np.prod(dims[1:]).astype('int32')))
... print('%s: %s' % (dims, test_utils.almost_equal(y, y_np)))
(2, 3, 4, 5): True
(2, 3): True
(2,): True
"""
class FullyConnectedDoc(SymbolDoc):
"""
Examples
--------
Construct a fully connected operator with target dimension 512.
>>> data = Variable('data') # or some constructed NN
>>> op = FullyConnected(data=data,
... num_hidden=512,
... name='FC1')
>>> op
<Symbol FC1>
>>> SymbolDoc.get_output_shape(op, data=(128, 100))
{'FC1_output': (128L, 512L)}
A simple 3-layer MLP with ReLU activation:
>>> net = Variable('data')
>>> for i, dim in enumerate([128, 64]):
... net = FullyConnected(data=net, num_hidden=dim, name='FC%d' % i)
... net = Activation(data=net, act_type='relu', name='ReLU%d' % i)
>>> # 10-class predictor (e.g. MNIST)
>>> net = FullyConnected(data=net, num_hidden=10, name='pred')
>>> net
<Symbol pred>
Regression Test
---------------
>>> dim_in, dim_out = (3, 4)
>>> x, w, b = test_utils.random_arrays((10, dim_in), (dim_out, dim_in), (dim_out,))
>>> op = FullyConnected(num_hidden=dim_out, name='FC')
>>> out = test_utils.simple_forward(op, FC_data=x, FC_weight=w, FC_bias=b)
>>> # numpy implementation of FullyConnected
>>> out_np = np.dot(x, w.T) + b
>>> test_utils.almost_equal(out, out_np)
True
"""
def _build_doc(func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None):
"""Build docstring for symbolic functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
if key_var_num_args:
desc += '\nThis function support variable length of positional input.'
doc_str = ('%s\n\n' +
'%s\n' +
'name : string, optional.\n' +
' Name of the resulting symbol.\n\n' +
'Returns\n' +
'-------\n' +
'Symbol\n' +
' The result symbol.')
doc_str = doc_str % (desc, param_str)
extra_doc = "\n" + '\n'.join([x.__doc__ for x in type.__subclasses__(SymbolDoc)
if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(" "), "", extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'Symbol', doc_str)
return doc_str
class ConcatDoc(SymbolDoc):
"""
Examples
--------
Concat two (or more) inputs along a specific dimension:
>>> a = Variable('a')
>>> b = Variable('b')
>>> c = Concat(a, b, dim=1, name='my-concat')
>>> c
<Symbol my-concat>
>>> SymbolDoc.get_output_shape(c, a=(128, 10, 3, 3), b=(128, 15, 3, 3))
{'my-concat_output': (128L, 25L, 3L, 3L)}
Note the shape should be the same except on the dimension that is being
concatenated.
"""
class BroadcastPlusDoc(SymbolDoc):
"""
Examples
--------
>>> a = Variable('a')
>>> b = Variable('b')
>>> c = broadcast_plus(a, b)
Normal summation with matching shapes:
>>> dev = mx.context.cpu();
>>> x = c.bind(dev, args={'a': mx.nd.ones((2, 2)), 'b' : mx.nd.ones((2, 2))})
>>> x.forward()
[<NDArray 2x2 @cpu(0)>]
>>> print x.outputs[0].asnumpy()
[[ 2. 2.]
[ 2. 2.]]
Broadcasting:
>>> x = c.bind(dev, args={'a': mx.nd.ones((2, 2)), 'b' : mx.nd.ones((1, 1))})
>>> x.forward()
[<NDArray 2x2 @cpu(0)>]
>>> print x.outputs[0].asnumpy()
[[ 2. 2.]
[ 2. 2.]]
>>> x = c.bind(dev, args={'a': mx.nd.ones((2, 1)), 'b' : mx.nd.ones((1, 2))})
>>> x.forward()
[<NDArray 2x2 @cpu(0)>]
>>> print x.outputs[0].asnumpy()
[[ 2. 2.]
[ 2. 2.]]
>>> x = c.bind(dev, args={'a': mx.nd.ones((1, 2)), 'b' : mx.nd.ones((2, 1))})
>>> x.forward()
[<NDArray 2x2 @cpu(0)>]
>>> print x.outputs[0].asnumpy()
[[ 2. 2.]
[ 2. 2.]]
"""
| apache-2.0 |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 30 | 3738 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(tf.test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with tf.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(tf.test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(dataframe,
batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with tf.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
40223136/w17test1 | static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/__init__.py | 603 | 6082 | ## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import * #brython fix me
from pygame.base import *
from pygame.constants import *
from pygame.version import *
from pygame.rect import Rect
import pygame.color
Color = pygame.color.Color
__version__ = ver
#added by earney
from . import time
from . import display
from . import constants
from . import event
from . import font
from . import mixer
from . import sprite
from .surface import Surface
from . import image
from . import mouse
from . import transform
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
'''
try: import pygame.cdrom
except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1)
try: import pygame.cursors
except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1)
try: import pygame.display
except (ImportError,IOError), msg:display=MissingModule("display", msg, 1)
try: import pygame.draw
except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1)
try: import pygame.event
except (ImportError,IOError), msg:event=MissingModule("event", msg, 1)
try: import pygame.image
except (ImportError,IOError), msg:image=MissingModule("image", msg, 1)
try: import pygame.joystick
except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1)
try: import pygame.key
except (ImportError,IOError), msg:key=MissingModule("key", msg, 1)
try: import pygame.mouse
except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1)
try: import pygame.sprite
except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1)
try: from pygame.surface import Surface
except (ImportError,IOError):Surface = lambda:Missing_Function
try: from pygame.overlay import Overlay
except (ImportError,IOError):Overlay = lambda:Missing_Function
try: import pygame.time
except (ImportError,IOError), msg:time=MissingModule("time", msg, 1)
try: import pygame.transform
except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1)
#lastly, the "optional" pygame modules
try:
import pygame.font
import pygame.sysfont
pygame.font.SysFont = pygame.sysfont.SysFont
pygame.font.get_fonts = pygame.sysfont.get_fonts
pygame.font.match_font = pygame.sysfont.match_font
except (ImportError,IOError), msg:font=MissingModule("font", msg, 0)
try: import pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#try: import pygame.movie
#except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0)
#try: import pygame.movieext
#except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0)
try: import pygame.surfarray
except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0)
try: import pygame.sndarray
except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0)
#try: import pygame.fastevent
#except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import pygame.imageext; del pygame.imageext
except (ImportError,IOError):pass
try: import pygame.mixer_music; del pygame.mixer_music
except (ImportError,IOError):pass
def packager_imports():
"""
Some additional things that py2app/py2exe will want to see
"""
import OpenGL.GL
'''
#make Rects pickleable
import copyreg
def __rect_constructor(x,y,w,h):
return Rect(x,y,w,h)
def __rect_reduce(r):
assert type(r) == Rect
return __rect_constructor, (r.x, r.y, r.w, r.h)
copyreg.pickle(Rect, __rect_reduce, __rect_constructor)
#cleanup namespace
del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
| gpl-3.0 |
brycefrank/pyfor | pyfor/rasterizer.py | 1 | 12374 | # Functions for rasterizing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyfor import gisexport
import pyfor.metrics
class Grid:
"""The Grid object is a representation of a point cloud that has been sorted into X and Y dimensional bins. From \
the Grid object we can derive other useful products, most importantly, :class:`.Raster` objects.
"""
def __init__(self, cloud, cell_size):
"""
Upon initialization, the parent cloud object's :attr:`data.points` attribute is sorted into bins in place. \ The
columns 'bins_x' and 'bins_y' are appended. Other useful information, such as the resolution, number of rows \
and columns are also stored.
:param cloud: The "parent" cloud object.
:param cell_size: The size of the cell for sorting in the units of the input cloud object.
"""
self.cloud = cloud
self.cell_size = cell_size
min_x, max_x = self.cloud.data.min[0], self.cloud.data.max[0]
min_y, max_y = self.cloud.data.min[1], self.cloud.data.max[1]
self.m = int(np.ceil((max_y - min_y) / cell_size))
self.n = int(np.ceil((max_x - min_x) / cell_size))
self.cloud.data.points.loc[:, "bins_x"] = (np.floor((self.cloud.data.points["x"].values - min_x) / self.cell_size)).astype(np.int)
self.cloud.data.points.loc[:, "bins_y"] = (np.floor((max_y - self.cloud.data.points["y"].values) / self.cell_size)).astype(np.int)
self.cells = self.cloud.data.points.groupby(["bins_x", "bins_y"])
def _update(self):
self.cloud.data._update()
self.__init__(self.cloud, self.cell_size)
def raster(self, func, dim, **kwargs):
"""
Generates an m x n matrix with values as calculated for each cell in func. This is a raw array without \
missing cells interpolated. See self.interpolate for interpolation methods.
:param func: A function string, i.e. "max" or a function itself, i.e. :func:`np.max`. This function must be \
able to take a 1D array of the given dimension as an input and produce a single value as an output. This \
single value will become the value of each cell in the array.
:param dim: A dimension to calculate on.
:return: A 2D numpy array where the value of each cell is the result of the passed function.
"""
bin_summary = self.cells.agg({dim: func}, **kwargs).reset_index()
array = np.full((self.m, self.n), np.nan)
array[bin_summary["bins_y"], bin_summary["bins_x"]] = bin_summary[dim]
return Raster(array, self)
@property
def empty_cells(self):
"""
Retrieves the cells with no returns in self.data
return: An N x 2 numpy array where each row cooresponds to the [y x] coordinate of the empty cell.
"""
array = self.raster("count", "z").array
emptys = np.argwhere(np.isnan(array))
return emptys
def interpolate(self, func, dim, interp_method="nearest"):
"""
Interpolates missing cells in the grid. This function uses scipy.griddata as a backend. Please see \
documentation for that function for more details.
:param func: The function (or function string) to calculate an array on the gridded data.
:param dim: The dimension (i.e. column name of self.cells) to cast func onto.
:param interp_method: The interpolation method call for scipy.griddata, one of any: "nearest", "cubic", \
"linear"
:return: An interpolated array.
"""
from scipy.interpolate import griddata
# Get points and values that we already have
cell_values = self.cells[dim].agg(func).reset_index()
points = cell_values[["bins_x", "bins_y"]].values
values = cell_values[dim].values
X, Y = np.mgrid[1 : self.n + 1, 1 : self.m + 1]
# TODO generally a slow approach
interp_grid = griddata(points, values, (X, Y), method=interp_method).T
return Raster(interp_grid, self)
def metrics(self, func_dict, as_raster=False):
"""
Calculates summary statistics for each grid cell in the Grid.
:param func_dict: A dictionary containing keys corresponding to the columns of self.data and values that \
correspond to the functions to be called on those columns.
:return: A pandas dataframe with the aggregated metrics.
"""
# Aggregate on the function
aggregate = self.cells.agg(func_dict)
if as_raster == False:
return aggregate
else:
rasters = []
for column in aggregate:
array = np.asarray(
aggregate[column].reset_index().pivot("bins_y", "bins_x")
)
raster = Raster(array, self)
rasters.append(raster)
# Get list of dimension names
dims = [tup[0] for tup in list(aggregate)]
# Get list of metric names
metrics = [tup[1] for tup in list(aggregate)]
return pd.DataFrame(
{"dim": dims, "metric": metrics, "raster": rasters}
).set_index(["dim", "metric"])
def standard_metrics(self, heightbreak=0):
return pyfor.metrics.standard_metrics_grid(self, heightbreak=heightbreak)
class ImportedGrid(Grid):
"""
ImportedGrid is used to normalize a parent cloud object with an arbitrary raster file.
"""
def __init__(self, path, cloud):
import rasterio
self.in_raster = rasterio.open(path)
# Check cell size
cell_size_x, cell_size_y = (
self.in_raster.transform[0],
abs(self.in_raster.transform[4]),
)
if cell_size_x != cell_size_y:
print("Cell sizes not equal of input raster, not supported.")
raise ValueError
else:
cell_size = cell_size_x
self.cloud = cloud
self.cell_size = cell_size
min_x, max_x = self.in_raster.bounds[0], self.in_raster.bounds[2]
min_y, max_y = self.in_raster.bounds[1], self.in_raster.bounds[3]
self.m = self.in_raster.height
self.n = self.in_raster.width
# Create bins
bins_x = np.searchsorted(
np.linspace(min_x, max_x, self.n), self.cloud.data.points["x"]
)
bins_y = np.searchsorted(
np.linspace(min_y, max_y, self.m), self.cloud.data.points["y"]
)
self.cloud.data.points["bins_x"] = bins_x
self.cloud.data.points["bins_y"] = bins_y
self.cells = self.cloud.data.points.groupby(["bins_x", "bins_y"])
def _update(self):
self.cloud.data._update()
class Raster:
def __init__(self, array, grid):
from rasterio.transform import from_origin
self.grid = grid
self.cell_size = self.grid.cell_size
self.array = array
self._affine = from_origin(
self.grid.cloud.data.min[0],
self.grid.cloud.data.max[1],
self.grid.cell_size,
self.grid.cell_size,
)
@classmethod
def from_rasterio(cls):
pass
def force_extent(self, bbox):
"""
Sets `self._affine` and `self.array` to a forced bounding box. Useful for trimming edges off of rasters when
processing buffered tiles. This operation is done in place.
:param bbox: Coordinates of output raster as a tuple (min_x, max_x, min_y, max_y)
"""
from rasterio.transform import from_origin
new_left, new_right, new_bot, new_top = bbox
m, n = self.array.shape[0], self.array.shape[1]
# Maniupulate the array to fit the new affine transformation
old_left, old_top = self.grid.cloud.data.min[0], self.grid.cloud.data.max[1]
old_right, old_bot = (
old_left + n * self.grid.cell_size,
old_top - m * self.grid.cell_size,
)
left_diff, top_diff, right_diff, bot_diff = (
old_left - new_left,
old_top - new_top,
old_right - new_right,
old_bot - new_bot,
)
left_diff, top_diff, right_diff, bot_diff = (
int(np.rint(left_diff / self.cell_size)),
int(np.rint(top_diff / self.cell_size)),
int(np.rint(right_diff / self.cell_size)),
int(np.rint(bot_diff / self.cell_size)),
)
if left_diff > 0:
# bbox left is outside of raster left, we need to add columns of nans
emptys = np.empty((m, left_diff))
emptys[:] = np.nan
self.array = np.insert(self.array, 0, np.transpose(emptys), axis=1)
elif left_diff != 0:
# bbox left is inside of raster left, we need to remove left diff columns
self.array = self.array[:, abs(left_diff) :]
if top_diff < 0:
# bbox top is outside of raster top, we need to add rows of nans
emptys = np.empty((abs(top_diff), self.array.shape[1]))
emptys[:] = np.nan
self.array = np.insert(self.array, 0, emptys, axis=0)
elif top_diff != 0:
# bbox top is inside of raster top, we need to remove rows of nans
self.array = self.array[abs(top_diff) :, :]
if right_diff < 0:
# bbox right is outside of raster right, we need to add columns of nans
emptys = np.empty((self.array.shape[0], abs(right_diff)))
emptys[:] = np.nan
self.array = np.append(self.array, emptys, axis=1)
elif right_diff != 0:
# bbox right is inside raster right, we need to remove columns
self.array = self.array[:, :-right_diff]
if bot_diff > 0:
# bbox bottom is outside of raster bottom, we need to add rows of nans
emptys = np.empty((abs(bot_diff), self.array.shape[1]))
emptys[:] = np.nan
self.array = np.append(self.array, emptys, axis=0)
elif bot_diff != 0:
# bbox bottom is inside of raster bottom, we need to remove columns
self.array = self.array[:bot_diff, :]
# Handle the affine transformation
new_affine = from_origin(
new_left, new_top, self.grid.cell_size, self.grid.cell_size
)
self._affine = new_affine
def plot(self, cmap="viridis", block=False, return_plot=False):
"""
Default plotting method for the Raster object.
"""
# TODO implement cmap
fig = plt.figure()
ax = fig.add_subplot(111)
caz = ax.matshow(self.array)
fig.colorbar(caz)
ax.xaxis.tick_bottom()
ax.set_xticks(np.linspace(0, self.grid.n, 3))
ax.set_yticks(np.flip(np.linspace(0, self.grid.m, 3)))
x_ticks, y_ticks = (
np.rint(
np.linspace(self.grid.cloud.data.min[0], self.grid.cloud.data.max[0], 3)
),
np.rint(
np.linspace(self.grid.cloud.data.min[1], self.grid.cloud.data.max[1], 3)
),
)
ax.set_xticklabels(x_ticks)
ax.set_yticklabels(y_ticks)
if return_plot == True:
return ax
else:
plt.show(block=block)
def pit_filter(self, kernel_size):
"""
Filters pits in the raster. Intended for use with canopy height models (i.e. grid(0.5).interpolate("max", "z").
This function modifies the raster array **in place**.
:param kernel_size: The size of the kernel window to pass over the array. For example 3 -> 3x3 kernel window.
"""
from scipy.signal import medfilt2d
self.array = medfilt2d(self.array, kernel_size=kernel_size)
def write(self, path):
"""
Writes the raster to a geotiff. Requires the Cloud.crs attribute to be filled by a projection string (ideally \
wkt or proj4).
:param path: The path to write to.
"""
if not self.grid.cloud.crs:
from warnings import warn
warn(
"No coordinate reference system defined. Please set the .crs attribute of the Cloud object.",
UserWarning,
)
gisexport.array_to_raster(self.array, self._affine, self.grid.cloud.crs, path)
| mit |
LiJiefei/googlemock | test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| bsd-3-clause |
jcherqui/searx | tests/unit/engines/test_spotify.py | 6 | 4843 | from collections import defaultdict
import mock
from searx.engines import spotify
from searx.testing import SearxTestCase
class TestSpotifyEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 0
params = spotify.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('spotify.com', params['url'])
def test_response(self):
self.assertRaises(AttributeError, spotify.response, None)
self.assertRaises(AttributeError, spotify.response, [])
self.assertRaises(AttributeError, spotify.response, '')
self.assertRaises(AttributeError, spotify.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(spotify.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(spotify.response(response), [])
json = """
{
"tracks": {
"href": "https://api.spotify.com/v1/search?query=nosfell&offset=0&limit=20&type=track",
"items": [
{
"album": {
"album_type": "album",
"external_urls": {
"spotify": "https://open.spotify.com/album/5c9ap1PBkSGLxT3J73toxA"
},
"href": "https://api.spotify.com/v1/albums/5c9ap1PBkSGLxT3J73toxA",
"id": "5c9ap1PBkSGLxT3J73toxA",
"name": "Album Title",
"type": "album",
"uri": "spotify:album:5c9ap1PBkSGLxT3J73toxA"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0bMc6b75FfZEpQHG1jifKu"
},
"href": "https://api.spotify.com/v1/artists/0bMc6b75FfZEpQHG1jifKu",
"id": "0bMc6b75FfZEpQHG1jifKu",
"name": "Artist Name",
"type": "artist",
"uri": "spotify:artist:0bMc6b75FfZEpQHG1jifKu"
}
],
"disc_number": 1,
"duration_ms": 202386,
"explicit": false,
"external_ids": {
"isrc": "FRV640600067"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/2GzvFiedqW8hgqUpWcASZa"
},
"href": "https://api.spotify.com/v1/tracks/2GzvFiedqW8hgqUpWcASZa",
"id": "1000",
"is_playable": true,
"name": "Title of track",
"popularity": 6,
"preview_url": "https://p.scdn.co/mp3-preview/7b8ecda580965a066b768c2647f877e43f7b1a0a",
"track_number": 3,
"type": "track",
"uri": "spotify:track:2GzvFiedqW8hgqUpWcASZa"
}
],
"limit": 20,
"next": "https://api.spotify.com/v1/search?query=nosfell&offset=20&limit=20&type=track",
"offset": 0,
"previous": null,
"total": 107
}
}
"""
response = mock.Mock(text=json)
results = spotify.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title of track')
self.assertEqual(results[0]['url'], 'https://open.spotify.com/track/2GzvFiedqW8hgqUpWcASZa')
self.assertEqual(results[0]['content'], 'Artist Name - Album Title - Title of track')
self.assertIn('1000', results[0]['embedded'])
json = """
{
"tracks": {
"href": "https://api.spotify.com/v1/search?query=nosfell&offset=0&limit=20&type=track",
"items": [
{
"href": "https://api.spotify.com/v1/tracks/2GzvFiedqW8hgqUpWcASZa",
"id": "1000",
"is_playable": true,
"name": "Title of track",
"popularity": 6,
"preview_url": "https://p.scdn.co/mp3-preview/7b8ecda580965a066b768c2647f877e43f7b1a0a",
"track_number": 3,
"type": "album",
"uri": "spotify:track:2GzvFiedqW8hgqUpWcASZa"
}
],
"limit": 20,
"next": "https://api.spotify.com/v1/search?query=nosfell&offset=20&limit=20&type=track",
"offset": 0,
"previous": null,
"total": 107
}
}
"""
response = mock.Mock(text=json)
results = spotify.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| agpl-3.0 |
piccolbo/rightload | embedUI.py | 1 | 4508 | """Insert feedback UI in feed entry."""
import bs4 as bs
from colour import Color
from content_extraction import get_text, get_url
from feature_extraction import text2sentences
from flask import request
from fuzzywuzzy import fuzz
import logging as log
import numpy as np
from scipy.stats import rankdata
from traceback import format_exc
def _feedbackurl(link, well_spent):
return (
u"http://"
+ request.host
+ u"/feedback/"
+ (u"l" if well_spent else u"d")
+ u"/"
+ link
)
def _is_long(text):
return len(text) > 1000
def _p(style, text):
return u'<p style="{style}">{text}</p>'.format(style=style, text=text)
def _a(href, target, text):
return u'<a href="{href}" target="{target}">{text}</a>'.format(
href=href, target=target, text=text
)
def _font(color, text):
return u'<font color="{color}">{text}</font>'.format(color=color, text=text)
def _span(text, color):
# style = u'"border-bottom: 3px solid {color}"'
# style = u'"text-decoration: underline; text-decoration-color: {color}"'
style = u'"background-color: {color}; line-height: 1"'.format(color=color)
return u"<span style={style}>{text}</span>".format(text=text, style=style)
def _feedback_link(is_good, content_link):
return _a(
href=_feedbackurl(link=content_link, well_spent=is_good),
target=u"_top",
text=_font(
color=u"green" if is_good else u"red",
text=u"Time Well Spent" if is_good else u"Time Wasted",
),
)
def _conditional_bar(mean_score, content_link):
return _p(
style=u"BACKGROUND-COLOR: #DBDBDB",
text=(_feedback_link(True, content_link) if mean_score <= 0.5 else u"")
+ (u" or " if mean_score == 0.5 else u"")
+ (_feedback_link(False, content_link) if mean_score >= 0.5 else u""),
)
def _add_bar(text, mean_score, content_link):
bar = _conditional_bar(mean_score, content_link)
return bar + text + (bar if _is_long(text) else u"")
def _embedUI_entry(entry, score):
if score is not None:
mean_score = score.mean()
# body = entry2text(entry)
body = _highlight_text(get_text(entry=entry), score)
# body = get_html(entry=entry)
# body = _highlight_html(html, text, score) #broken
url = get_url(entry)
if u"description" in entry:
entry[u"description"] = _add_bar(body, mean_score, url)
if u"content" in entry:
entry[u"content"][0].value = _add_bar(body, mean_score, url)
if u"title" in entry:
entry[u"title"] = u"{mean_score:} | {title}".format(
mean_score=int(mean_score * 100), title=entry[u"title"]
)
return entry
def embedUI(parsed_feed, score):
"""Insert a UI element in each entry of a feed.
Parameters
----------
parsed_feed : feedparser.
Description of parameter `parsed_feed`.
score : type
Description of parameter `score`.
Returns
-------
type
Description of returned object.
"""
parsed_feed.entries = [
_embedUI_entry(e, s) for e, s in zip(parsed_feed.entries, score)
]
return parsed_feed
_colors = list(Color(hsl=(0.8, 1, 1)).range_to(Color(hsl=(0.8, 1, 0.8)), 256))
def _score2color(score):
return _colors[min(int(score * 256), 255)].get_hex_l()
def _highlight_text(text, score):
try:
sentences = text2sentences(text)
rank = (rankdata(score) - 1) / (len(score) - 1.0) if len(score) > 1 else [0.5]
return u"".join(
[_highlight_sentence(x, s, r) for x, s, r in zip(sentences, score, rank)]
)
except Exception:
log.error(format_exc())
return text
def _highlight_sentence(sentence, score, rank):
return _span(
u"<small>{s:.2f}</small> {x}".format(x=sentence, s=score), _score2color(rank)
)
def _best_match_score(x, sentences, score):
assert len(sentences) == len(score), (len(sentences), len(score))
return score[np.array([fuzz.ratio(x, s) for s in sentences]).argmax()]
def _highlight_html(html, text, score):
# this doesn't work yet for paragraphs of multiple sentences
sentences = text2sentences(text)
soup = bs.BeautifulSoup(html)
for x in soup.findAll(text=True):
x.replaceWith(
bs.BeautifulSoup(
_highlight_sentence(x, _best_match_score(x, sentences, score))
)
)
return soup
| agpl-3.0 |
lucasrangit/twitter-winner | twitter-winner/requests_oauthlib/oauth1_session.py | 4 | 14418 | from __future__ import unicode_literals
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import logging
from oauthlib.common import add_params_to_uri, urldecode
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
import requests
from . import OAuth1
import sys
if sys.version > "3":
unicode = str
log = logging.getLogger(__name__)
class TokenRequestDenied(ValueError):
def __init__(self, message, status_code):
super(TokenRequestDenied, self).__init__(message)
self.status_code = status_code
class OAuth1Session(requests.Session):
"""Request signing and convenience methods for the oauth dance.
What is the difference between OAuth1Session and OAuth1?
OAuth1Session actually uses OAuth1 internally and it's purpose is to assist
in the OAuth workflow through convenience methods to prepare authorization
URLs and parse the various token and redirection responses. It also provide
rudimentary validation of responses.
An example of the OAuth workflow using a basic CLI app and Twitter.
>>> # Credentials obtained during the registration.
>>> client_key = 'client key'
>>> client_secret = 'secret'
>>> callback_uri = 'https://127.0.0.1/callback'
>>>
>>> # Endpoints found in the OAuth provider API documentation
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>>
>>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
>>>
>>> # First step, fetch the request token.
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'kjerht2309u',
'oauth_token_secret': 'lsdajfh923874',
}
>>>
>>> # Second step. Follow this link and authorize
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
>>>
>>> # Third step. Fetch the access token
>>> redirect_response = raw_input('Paste the full redirect URL here.')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> # Done. You can now make OAuth requests.
>>> status_url = 'http://api.twitter.com/1/statuses/update.json'
>>> new_status = {'status': 'hello world!'}
>>> oauth_session.post(status_url, data=new_status)
<Response [200]>
"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
client_class=None,
force_include_body=False,
**kwargs):
"""Construct the OAuth 1 session.
:param client_key: A client specific identifier.
:param client_secret: A client specific secret used to create HMAC and
plaintext signatures.
:param resource_owner_key: A resource owner key, also referred to as
request token or access token depending on
when in the workflow it is used.
:param resource_owner_secret: A resource owner secret obtained with
either a request or access token. Often
referred to as token secret.
:param callback_uri: The URL the user is redirect back to after
authorization.
:param signature_method: Signature methods determine how the OAuth
signature is created. The three options are
oauthlib.oauth1.SIGNATURE_HMAC (default),
oauthlib.oauth1.SIGNATURE_RSA and
oauthlib.oauth1.SIGNATURE_PLAIN.
:param signature_type: Signature type decides where the OAuth
parameters are added. Either in the
Authorization header (default) or to the URL
query parameters or the request body. Defined as
oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
oauthlib.oauth1.SIGNATURE_TYPE_BODY
respectively.
:param rsa_key: The private RSA key as a string. Can only be used with
signature_method=oauthlib.oauth1.SIGNATURE_RSA.
:param verifier: A verifier string to prove authorization was granted.
:param client_class: A subclass of `oauthlib.oauth1.Client` to use with
`requests_oauthlib.OAuth1` instead of the default
:param force_include_body: Always include the request body in the
signature creation.
:param **kwargs: Additional keyword arguments passed to `OAuth1`
"""
super(OAuth1Session, self).__init__()
self._client = OAuth1(client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=callback_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
client_class=client_class,
force_include_body=force_include_body,
**kwargs)
self.auth = self._client
def authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
An example using a registered default callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
>>> oauth_session.authorization_url(authorization_url, foo='bar')
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
An example using an explicit callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
"""
kwargs['oauth_token'] = request_token or self._client.client.resource_owner_key
log.debug('Adding parameters %s to url %s', kwargs, url)
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, realm=None):
"""Fetch a request token.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: The request token endpoint URL.
:param realm: A list of realms to request access to.
:returns: The response in dict format.
Note that a previously set callback_uri will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
self._client.client.realm = ' '.join(realm) if realm else None
token = self._fetch_token(url)
log.debug('Resetting callback_uri and realm (not needed in next phase).')
self._client.client.callback_uri = None
self._client.client.realm = None
return token
def fetch_access_token(self, url):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if not getattr(self._client.client, 'verifier', None):
raise ValueError('No client verifier has been set.')
token = self._fetch_token(url)
log.debug('Resetting verifier attribute, should not be used anymore.')
self._client.client.verifier = None
return token
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
"""
log.debug('Parsing token from query part of url %s', url)
token = dict(urldecode(urlparse(url).query))
log.debug('Updating internal client token attribute.')
self._populate_attributes(token)
return token
def _populate_attributes(self, token):
if 'oauth_token' in token:
self._client.client.resource_owner_key = token['oauth_token']
else:
raise ValueError('Response does not contain a token. %s', token)
if 'oauth_token_secret' in token:
self._client.client.resource_owner_secret = (
token['oauth_token_secret'])
if 'oauth_verifier' in token:
self._client.client.verifier = token['oauth_verifier']
def _fetch_token(self, url):
log.debug('Fetching token from %s using client %s', url, self._client.client)
r = self.post(url)
if r.status_code >= 400:
error = "Token request failed with code %s, response was '%s'."
raise TokenRequestDenied(error % (r.status_code, r.text), r.status_code)
log.debug('Decoding token from response "%s"', r.text)
try:
token = dict(urldecode(r.text))
except ValueError as e:
error = ("Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was %s""" % e)
raise ValueError(error)
log.debug('Obtained token %s', token)
log.debug('Updating internal client attributes from token data.')
self._populate_attributes(token)
return token
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
prepared_request.headers.pop('Authorization', True)
prepared_request.prepare_auth(self.auth)
return
| mit |
r888888888/models | inception/inception/slim/losses_test.py | 34 | 6414 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import losses
class LossesTest(tf.test.TestCase):
def testL1Loss(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
weights = tf.constant(1.0, shape=shape)
wd = 0.01
loss = losses.l1_loss(weights, wd)
self.assertEquals(loss.op.name, 'L1Loss/value')
self.assertAlmostEqual(loss.eval(), num_elem * wd, 5)
def testL2Loss(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
weights = tf.constant(1.0, shape=shape)
wd = 0.01
loss = losses.l2_loss(weights, wd)
self.assertEquals(loss.op.name, 'L2Loss/value')
self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5)
class RegularizersTest(tf.test.TestCase):
def testL1Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L1Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def testL1RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_regularizer(scope='L1')(tensor)
self.assertEquals(loss.op.name, 'L1/value')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def testL1RegularizerWithWeight(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight = 0.01
loss = losses.l1_regularizer(weight)(tensor)
self.assertEquals(loss.op.name, 'L1Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem * weight, 5)
def testL2Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l2_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def testL2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l2_regularizer(scope='L2')(tensor)
self.assertEquals(loss.op.name, 'L2/value')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def testL2RegularizerWithWeight(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight = 0.01
loss = losses.l2_regularizer(weight)(tensor)
self.assertEquals(loss.op.name, 'L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5)
def testL1L2Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_l2_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def testL1L2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_l2_regularizer(scope='L1L2')(tensor)
self.assertEquals(loss.op.name, 'L1L2/value')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def testL1L2RegularizerWithWeights(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight_l1 = 0.01
weight_l2 = 0.05
loss = losses.l1_l2_regularizer(weight_l1, weight_l2)(tensor)
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
self.assertAlmostEqual(loss.eval(),
num_elem * weight_l1 + num_elem * weight_l2 / 2, 5)
class CrossEntropyLossTest(tf.test.TestCase):
def testCrossEntropyLossAllCorrect(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = losses.cross_entropy_loss(logits, labels)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testCrossEntropyLossAllWrong(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = losses.cross_entropy_loss(logits, labels)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testCrossEntropyLossAllWrongWithWeight(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = losses.cross_entropy_loss(logits, labels, weight=0.5)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 5.0, 3)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
rinsewester/SDFkit | tests/testsdfgraph.py | 1 | 2225 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Validation of the CSDF class usign unittests
author: Rinse Wester
"""
import unittest
from csdfgraph import CSDFGraph
class SDFGraphTestCase(unittest.TestCase):
def setUp(self):
# create a graph with a cycle
self.cycle_cons_sdf_graph = CSDFGraph()
# function for n0: accept single token and produce a pair with 0 and 1 added to it
f_n0 = 'lambda xs, firecounter, phase: [xs[0], xs[0] + 1]'
f_n1 = 'lambda xs, firecounter, phase: [xs[0] + xs[1]]'
self.cycle_cons_sdf_graph.add_node('n0', f_n0, (0, 0))
self.cycle_cons_sdf_graph.add_node('n1', f_n1, (100, 0))
# connect node n0 and n1 using two edges to form a cycle with an initial token
self.cycle_cons_sdf_graph.add_edge('n0', 'n1', 0, 0, [2], [2])
self.cycle_cons_sdf_graph.add_edge('n1', 'n0', 0, 0, [1], [1], tkns=[0])
def test_data_from_two_firings(self):
# After one firing, the token on the edge from n1 to n0 should be consumed
# and two tokens should be produced
self.cycle_cons_sdf_graph.step()
self.assertEqual(self.cycle_cons_sdf_graph.edge['n1']['n0']['tkns'], [])
self.assertEqual(self.cycle_cons_sdf_graph.edge['n0']['n1']['tkns'], [0, 1])
# do an other step: ...........
self.cycle_cons_sdf_graph.step()
self.assertEqual(self.cycle_cons_sdf_graph.edge['n1']['n0']['tkns'], [1])
self.assertEqual(self.cycle_cons_sdf_graph.edge['n0']['n1']['tkns'], [])
def test_node_firings_storing(self):
# perform a few iterations to fill up the activation log for each node
for i in range(7):
self.cycle_cons_sdf_graph.step()
# firing pattern should be n0 n1 n0 n1 n0 n1
self.assertEqual(self.cycle_cons_sdf_graph.nodefirings['n0'], [True, False, True, False, True, False, True])
self.assertEqual(self.cycle_cons_sdf_graph.nodefirings['n1'], [False, True, False, True, False, True, False])
def test_graph_type_detection(self):
# initial graph -> SDF
self.assertTrue(self.cycle_cons_sdf_graph.isSDF())
def main():
unittest.main()
if __name__ == '__main__':
main() | mit |
eonpatapon/rally | tests/unit/doc/test_task_samples.py | 4 | 5334 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import os
import re
import traceback
import yaml
from rally import api
from rally.task import scenario
from rally.task import engine
from tests.unit import test
class TaskSampleTestCase(test.TestCase):
samples_path = os.path.join(
os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir,
"samples", "tasks")
@mock.patch("rally.task.engine.TaskEngine"
"._validate_config_semantic")
def test_schema_is_valid(self,
mock_task_engine__validate_config_semantic):
scenarios = set()
for dirname, dirnames, filenames in os.walk(self.samples_path):
for filename in filenames:
full_path = os.path.join(dirname, filename)
# NOTE(hughsaunders): Skip non config files
# (bug https://bugs.launchpad.net/rally/+bug/1314369)
if not re.search("\.(ya?ml|json)$", filename, flags=re.I):
continue
with open(full_path) as task_file:
try:
task_config = yaml.safe_load(api.Task.render_template
(task_file.read()))
eng = engine.TaskEngine(task_config,
mock.MagicMock())
eng.validate()
except Exception:
print(traceback.format_exc())
self.fail("Invalid task file: %s" % full_path)
else:
scenarios.update(task_config.keys())
missing = set(s.get_name() for s in scenario.Scenario.get_all())
missing -= scenarios
# check missing scenario is not from plugin
missing = [s for s in list(missing)
if scenario.Scenario.get(s).__module__.startswith("rally")]
self.assertEqual(missing, [],
"These scenarios don't have samples: %s" % missing)
def test_json_correct_syntax(self):
for dirname, dirnames, filenames in os.walk(self.samples_path):
for filename in filenames:
if not filename.endswith(".json"):
continue
full_path = os.path.join(dirname, filename)
with open(full_path) as task_file:
try:
json.loads(api.Task.render_template(task_file.read()))
except Exception:
print(traceback.format_exc())
self.fail("Invalid JSON file: %s" % full_path)
def test_task_config_pair_existance(self):
inexistent_paths = []
for dirname, dirnames, filenames in os.walk(self.samples_path):
# iterate over unique config names
for sample_name in set(
f[:-5] for f in filenames
if f.endswith(".json") or f.endswith(".yaml")):
partial_path = os.path.join(dirname, sample_name)
yaml_path = partial_path + ".yaml"
json_path = partial_path + ".json"
if not os.path.exists(yaml_path):
inexistent_paths.append(yaml_path)
elif not os.path.exists(json_path):
inexistent_paths.append(json_path)
if inexistent_paths:
self.fail("Sample task configs are missing:\n%r"
% inexistent_paths)
def test_task_config_pairs_equality(self):
for dirname, dirnames, filenames in os.walk(self.samples_path):
# iterate over unique config names
for sample_name in set(
f[:-5] for f in filenames
if f.endswith(".json") or f.endswith(".yaml")):
partial_path = os.path.join(dirname, sample_name)
yaml_path = partial_path + ".yaml"
json_path = partial_path + ".json"
if os.path.exists(yaml_path) and os.path.exists(json_path):
with open(json_path) as json_file:
json_config = yaml.safe_load(api.Task.render_template
(json_file.read()))
with open(yaml_path) as yaml_file:
yaml_config = yaml.safe_load(api.Task.render_template
(yaml_file.read()))
self.assertEqual(json_config, yaml_config,
"Sample task configs are not equal:"
"\n%s\n%s" % (yaml_path, json_path))
| apache-2.0 |
jdobes/cobbler | cobbler/collection_images.py | 15 | 2652 | """
A image instance represents a ISO or virt image we want to track
and repeatedly install. It differs from a answer-file based installation.
Copyright 2006-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import item_image as image
import utils
import collection
from cexceptions import *
from utils import _
import action_litesync
#--------------------------------------------
class Images(collection.Collection):
def collection_type(self):
return "image"
def factory_produce(self,config,seed_data):
"""
Return a Distro forged from seed_data
"""
return image.Image(config).from_datastruct(seed_data)
def remove(self,name,with_delete=True,with_sync=True,with_triggers=True,recursive=True, logger=None):
"""
Remove element named 'name' from the collection
"""
# NOTE: with_delete isn't currently meaningful for repos
# but is left in for consistancy in the API. Unused.
name = name.lower()
# first see if any Groups use this distro
if not recursive:
for v in self.config.systems():
if v.image is not None and v.image.lower() == name:
raise CX(_("removal would orphan system: %s") % v.name)
obj = self.find(name=name)
if obj is not None:
if recursive:
kids = obj.get_children()
for k in kids:
self.config.api.remove_system(k, recursive=True, logger=logger)
if with_delete:
if with_triggers:
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/delete/image/pre/*", [], logger)
if with_sync:
lite_sync = action_litesync.BootLiteSync(self.config, logger=logger)
lite_sync.remove_single_image(name)
del self.listing[name]
self.config.serialize_delete(self, obj)
if with_delete:
if with_triggers:
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/delete/image/post/*", [], logger)
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/change/*", [], logger)
return True
raise CX(_("cannot delete an object that does not exist: %s") % name)
| gpl-2.0 |
Thingee/cinder | cinder/volume/drivers/vmware/io_util.py | 2 | 7026 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility classes for defining the time saving transfer of data from the reader
to the write using a LightQueue as a Pipe between the reader and the writer.
"""
from eventlet import event
from eventlet import greenthread
from eventlet import queue
from cinder import exception
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
IO_THREAD_SLEEP_TIME = .01
GLANCE_POLL_INTERVAL = 5
class ThreadSafePipe(queue.LightQueue):
"""The pipe to hold the data which the reader writes to and the writer
reads from.
"""
def __init__(self, maxsize, max_transfer_size):
queue.LightQueue.__init__(self, maxsize)
self.max_transfer_size = max_transfer_size
self.transferred = 0
def read(self, chunk_size):
"""Read data from the pipe.
Chunksize is ignored for we have ensured that the data chunks written
to the pipe by readers is the same as the chunks asked for by Writer.
"""
if self.transferred < self.max_transfer_size:
data_item = self.get()
self.transferred += len(data_item)
LOG.debug(_("Read %(bytes)s out of %(max)s from ThreadSafePipe.") %
{'bytes': self.transferred,
'max': self.max_transfer_size})
return data_item
else:
LOG.debug(_("Completed transfer of size %s.") % self.transferred)
return ""
def write(self, data):
"""Put a data item in the pipe."""
self.put(data)
def seek(self, offset, whence=0):
"""Set the file's current position at the offset."""
pass
def tell(self):
"""Get size of the file to be read."""
return self.max_transfer_size
def close(self):
"""A place-holder to maintain consistency."""
pass
class GlanceWriteThread(object):
"""Ensures that image data is written to in the glance client and that
it is in correct ('active')state.
"""
def __init__(self, context, input_file, image_service, image_id,
image_meta=None):
if not image_meta:
image_meta = {}
self.context = context
self.input_file = input_file
self.image_service = image_service
self.image_id = image_id
self.image_meta = image_meta
self._running = False
def start(self):
self.done = event.Event()
def _inner():
"""Initiate write thread.
Function to do the image data transfer through an update
and thereon checks if the state is 'active'.
"""
LOG.debug(_("Initiating image service update on image: %(image)s "
"with meta: %(meta)s") % {'image': self.image_id,
'meta': self.image_meta})
self.image_service.update(self.context,
self.image_id,
self.image_meta,
data=self.input_file)
self._running = True
while self._running:
try:
image_meta = self.image_service.show(self.context,
self.image_id)
image_status = image_meta.get('status')
if image_status == 'active':
self.stop()
LOG.debug(_("Glance image: %s is now active.") %
self.image_id)
self.done.send(True)
# If the state is killed, then raise an exception.
elif image_status == 'killed':
self.stop()
msg = (_("Glance image: %s is in killed state.") %
self.image_id)
LOG.error(msg)
excep = exception.CinderException(msg)
self.done.send_exception(excep)
elif image_status in ['saving', 'queued']:
greenthread.sleep(GLANCE_POLL_INTERVAL)
else:
self.stop()
msg = _("Glance image %(id)s is in unknown state "
"- %(state)s") % {'id': self.image_id,
'state': image_status}
LOG.error(msg)
excep = exception.CinderException(msg)
self.done.send_exception(excep)
except Exception as exc:
self.stop()
self.done.send_exception(exc)
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def close(self):
pass
class IOThread(object):
"""Class that reads chunks from the input file and writes them to the
output file till the transfer is completely done.
"""
def __init__(self, input_file, output_file):
self.input_file = input_file
self.output_file = output_file
self._running = False
self.got_exception = False
def start(self):
self.done = event.Event()
def _inner():
"""Read data from input and write the same to output."""
self._running = True
while self._running:
try:
data = self.input_file.read(None)
if not data:
self.stop()
self.done.send(True)
self.output_file.write(data)
if hasattr(self.input_file, "update_progress"):
self.input_file.update_progress()
if hasattr(self.output_file, "update_progress"):
self.output_file.update_progress()
greenthread.sleep(IO_THREAD_SLEEP_TIME)
except Exception as exc:
self.stop()
LOG.exception(exc)
self.done.send_exception(exc)
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
| apache-2.0 |
fanjunwei/depot_tools | third_party/boto/contrib/__init__.py | 396 | 1107 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| bsd-3-clause |
gonzolino/heat | heat/tests/openstack/monasca/test_alarm_definition.py | 3 | 9351 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.engine.clients.os import monasca as client_plugin
from heat.engine import resource
from heat.engine.resources.openstack.monasca import alarm_definition
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-10-15',
'resources': {
'test_resource': {
'type': 'OS::Monasca::AlarmDefinition',
'properties': {
'name': 'sample_alarm_id',
'description': 'sample alarm def',
'expression': 'sample expression',
'match_by': ['match_by'],
'severity': 'low',
'ok_actions': ['sample_notification'],
'alarm_actions': ['sample_notification'],
'undetermined_actions': ['sample_notification'],
'actions_enabled': False
}
}
}
}
RESOURCE_TYPE = 'OS::Monasca::AlarmDefinition'
class MonascaAlarmDefinition(alarm_definition.MonascaAlarmDefinition):
"""This class overrides the is_service_available to return True.
Monasca service is not available by default. So, this class overrides
the is_service_available to return True.
"""
@classmethod
def is_service_available(cls, context):
return True
class MonascaAlarmDefinitionTest(common.HeatTestCase):
def setUp(self):
super(MonascaAlarmDefinitionTest, self).setUp()
self.ctx = utils.dummy_context()
# As monascaclient is not part of requirements.txt, RESOURCE_TYPE is
# not registered by default. For testing, its registered here
resource._register_class(RESOURCE_TYPE,
MonascaAlarmDefinition)
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
# Mock client plugin
self.test_client_plugin = client_plugin.MonascaClientPlugin(self.ctx)
self.test_client_plugin._create = mock.MagicMock(
return_value=self.test_client)
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
self.test_client_plugin.get_notification = mock.MagicMock(
return_value='sample_notification')
def _get_mock_resource(self):
value = dict(id='477e8273-60a7-4c41-b683-fdb0bc7cd152')
return value
def test_resource_handle_create(self):
mock_alarm_create = self.test_client.alarm_definitions.create
mock_alarm_patch = self.test_client.alarm_definitions.patch
mock_resource = self._get_mock_resource()
mock_alarm_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'sample_alarm_id',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.NAME))
self.assertEqual(
'sample alarm def',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.DESCRIPTION))
self.assertEqual(
'sample expression',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.EXPRESSION))
self.assertEqual(
['match_by'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.MATCH_BY))
self.assertEqual(
'low',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.SEVERITY))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS))
self.assertEqual(
False,
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED))
self.test_resource.data_set = mock.Mock()
self.test_resource.handle_create()
# validate physical resource id
self.assertEqual(mock_resource['id'], self.test_resource.resource_id)
args = dict(
name='sample_alarm_id',
description='sample alarm def',
expression='sample expression',
match_by=['match_by'],
severity='low',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_create.assert_called_once_with(**args)
mock_alarm_patch.assert_called_once_with(
alarm_id=self.test_resource.resource_id,
actions_enabled=False)
def test_resource_handle_update(self):
mock_alarm_patch = self.test_client.alarm_definitions.patch
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {
alarm_definition.MonascaAlarmDefinition.NAME:
'name-updated',
alarm_definition.MonascaAlarmDefinition.DESCRIPTION:
'description-updated',
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED:
True,
alarm_definition.MonascaAlarmDefinition.SEVERITY:
'medium',
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS:
['sample_notification']}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args = dict(
alarm_id=self.test_resource.resource_id,
name='name-updated',
description='description-updated',
actions_enabled=True,
severity='medium',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_patch.assert_called_once_with(**args)
def test_resource_handle_delete(self):
mock_alarm_delete = self.test_client.alarm_definitions.delete
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_alarm_delete.assert_called_once_with(
alarm_id=self.test_resource.resource_id
)
def test_resource_handle_delete_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_handle_delete_not_found(self):
# TODO(skraynev): remove it when monasca client will be
# merged in global requirements
class NotFound(Exception):
pass
client_plugin.monasca_exc = mock.Mock()
client_plugin.monasca_exc.NotFound = NotFound
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete = self.test_client.alarm_definitions.delete
mock_alarm_delete.side_effect = client_plugin.monasca_exc.NotFound
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_mapping(self):
mapping = alarm_definition.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(alarm_definition.MonascaAlarmDefinition,
mapping[RESOURCE_TYPE])
self.assertIsInstance(self.test_resource,
alarm_definition.MonascaAlarmDefinition)
def test_resource_show_resource(self):
mock_notification_get = self.test_client.alarm_definitions.get
mock_notification_get.return_value = {}
self.assertEqual({},
self.test_resource._show_resource(),
'Failed to show resource')
| apache-2.0 |
hmrc/wristband | config/settings/production.py | 1 | 2863 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
'''
from __future__ import absolute_import, unicode_literals
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure",)
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn",)
# AUTHENTICATION
# --------------
AUTH_LDAP_SERVER_URI = env('AUTH_LDAP_SERVER_URI')
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = env('AUTH_LDAP_BIND_AS_AUTHENTICATING_USER', default=True)
AUTH_LDAP_GLOBAL_OPTIONS = {
ldap.OPT_X_TLS_REQUIRE_CERT: ldap.OPT_X_TLS_NEVER,
ldap.OPT_NETWORK_TIMEOUT: 10,
ldap.OPT_DEBUG_LEVEL: 255
}
AUTH_LDAP_BIND_DN = ''
AUTH_LDAP_BIND_PASSWORD = ''
AUTH_LDAP_USER_DN_TEMPLATE = env('AUTH_LDAP_USER_DN_TEMPLATE', default=None)
AUTH_LDAP_USER_SEARCH_DN = env('AUTH_LDAP_USER_SEARCH_DN')
AUTH_LDAP_GROUP_SEARCH_DN = env('AUTH_LDAP_GROUP_SEARCH_DN')
AUTH_LDAP_SUPERUSER_DN = env('AUTH_LDAP_SUPERUSER_DN', default=None)
WRISTBAND_ENV = env('WRISTBAND_ENV', default='prod')
AUTH_LDAP_USER_SEARCH = LDAPSearch(AUTH_LDAP_USER_SEARCH_DN, ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_DN, ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)")
AUTH_LDAP_GROUP_TYPE = GroupOfNamesType()
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 300
AUTH_LDAP_REQUIRE_GROUP = env('AUTH_LDAP_REQUIRE_GROUP', default=None)
# We're not ready for this yet....
# AUTH_LDAP_USER_FLAGS_BY_GROUP = {
# "is_superuser": AUTH_LDAP_SUPERUSER_DN
# }
| apache-2.0 |
macs03/demo-cms | cms/lib/python2.7/site-packages/djangocms_video/migrations/0001_initial.py | 6 | 6735 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connection
class Migration(SchemaMigration):
def forwards(self, orm):
table_names = connection.introspection.table_names()
if 'cmsplugin_video' in table_names:
db.rename_table('cmsplugin_video', 'djangocms_video_video')
elif 'video_video' in table_names:
db.rename_table('video_video', 'djangocms_video_video')
else:
# Adding model 'Video'
db.create_table(u'djangocms_video_video', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('movie', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('movie_url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('width', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('height', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('auto_play', self.gf('django.db.models.fields.BooleanField')(default=False)),
('auto_hide', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fullscreen', self.gf('django.db.models.fields.BooleanField')(default=True)),
('loop', self.gf('django.db.models.fields.BooleanField')(default=False)),
('bgcolor', self.gf('django.db.models.fields.CharField')(default='000000', max_length=6)),
('textcolor', self.gf('django.db.models.fields.CharField')(default='FFFFFF', max_length=6)),
('seekbarcolor', self.gf('django.db.models.fields.CharField')(default='13ABEC', max_length=6)),
('seekbarbgcolor', self.gf('django.db.models.fields.CharField')(default='333333', max_length=6)),
('loadingbarcolor', self.gf('django.db.models.fields.CharField')(default='828282', max_length=6)),
('buttonoutcolor', self.gf('django.db.models.fields.CharField')(default='333333', max_length=6)),
('buttonovercolor', self.gf('django.db.models.fields.CharField')(default='000000', max_length=6)),
('buttonhighlightcolor', self.gf('django.db.models.fields.CharField')(default='FFFFFF', max_length=6)),
))
db.send_create_signal(u'djangocms_video', ['Video'])
def backwards(self, orm):
# Deleting model 'Video'
db.delete_table(u'djangocms_video_video')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'djangocms_video.video': {
'Meta': {'object_name': 'Video', '_ormbases': ['cms.CMSPlugin']},
'auto_hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_play': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bgcolor': ('django.db.models.fields.CharField', [], {'default': "'000000'", 'max_length': '6'}),
'buttonhighlightcolor': ('django.db.models.fields.CharField', [], {'default': "'FFFFFF'", 'max_length': '6'}),
'buttonoutcolor': ('django.db.models.fields.CharField', [], {'default': "'333333'", 'max_length': '6'}),
'buttonovercolor': ('django.db.models.fields.CharField', [], {'default': "'000000'", 'max_length': '6'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'loadingbarcolor': ('django.db.models.fields.CharField', [], {'default': "'828282'", 'max_length': '6'}),
'loop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'movie': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'movie_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'seekbarbgcolor': ('django.db.models.fields.CharField', [], {'default': "'333333'", 'max_length': '6'}),
'seekbarcolor': ('django.db.models.fields.CharField', [], {'default': "'13ABEC'", 'max_length': '6'}),
'textcolor': ('django.db.models.fields.CharField', [], {'default': "'FFFFFF'", 'max_length': '6'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['djangocms_video'] | mit |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/distutils/tests/test_install_scripts.py | 95 | 2652 | """Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
from test.test_support import run_unittest
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assertTrue(not cmd.force)
self.assertTrue(not cmd.skip_build)
self.assertTrue(cmd.build_dir is None)
self.assertTrue(cmd.install_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertTrue(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
try:
f.write(text)
finally:
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assertTrue(name in installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit |
baishancloud/pykit | shell/command.py | 2 | 3735 | #!/usr/bin/env python2
# coding: utf-8
import argparse
import copy
import sys
import logging
import os
from pykit import dictutil
logger = logging.getLogger(__name__)
def command(**kwargs):
root, parser = add_command_help(kwargs)
inputs = sys.argv[1:]
try:
cmds = []
while len(inputs) > 0 and root.has_key(inputs[0]):
k = inputs.pop(0)
cmds.append(k)
node = root[k]
if is_node_executable(node):
call_able, args = parse_executable_node(parser, cmds, node, inputs)
try:
logger.debug("command: " + repr(cmds) + ' args: ' + repr(args) + ' cwd: ' + repr(os.getcwd()))
rc = call_able(*args)
sys.exit(0
if rc is True or rc is 0 or rc is None
else 1)
except Exception as e:
logger.exception(repr(e))
sys.stderr.write(repr(e))
sys.exit(1)
else:
root = node
if need_to_show_help(parser):
if len(cmds) > 0:
argv = [' '.join(cmds)] + inputs
else:
argv = inputs
parser.parse_args(argv)
else:
sys.stderr.write('No such command: ' + ' '.join(sys.argv[1:]))
sys.exit(2)
except Exception as e:
logger.exception(repr(e))
sys.stderr.write(repr(e))
sys.exit(1)
def add_command_help(commands):
new_cmds = copy.deepcopy(commands)
help_msgs = new_cmds.get('__add_help__')
desc = new_cmds.get('__description__')
for k in ('__add_help__', '__description__'):
if new_cmds.has_key(k):
del new_cmds[k]
if help_msgs is None:
return new_cmds, None
parser = argparse.ArgumentParser(description=desc, epilog='\n')
subparsers = parser.add_subparsers(help=' command(s) to select ...')
for cmds, execute_able in dictutil.depth_iter(new_cmds):
help = help_msgs.get(tuple(cmds), '')
cmd = ' '.join(cmds)
cmd_parser = subparsers.add_parser(cmd, help=help)
if need_param_help(execute_able):
call_able = execute_able[0]
param_msgs = execute_able[1:]
params = add_param_help(cmd_parser, param_msgs)
# delete help message
dictutil.make_setter(cmds)(new_cmds, (call_able, params))
return new_cmds, parser
def add_param_help(parser, param_msgs):
params = []
for param, msg in param_msgs:
parser.add_argument(param, **msg)
param = param.lstrip('-')
params.append(param)
return params
def parse_executable_node(parser, cmds, execute_able, args):
if not need_to_show_help(parser):
# no __add_help__ but has paramter help message
if args_need_to_parse(execute_able):
return execute_able[0], args
return execute_able, args
args_parsed = parser.parse_args([' '.join(cmds)] + args)
# to dict
args_parsed = vars(args_parsed)
if not args_need_to_parse(execute_able):
return execute_able, args
call_able, params = execute_able
args = [args_parsed.get(x) for x in params]
return call_able, args
def is_node_executable(node):
if isinstance(node, (list, tuple)) and len(node) > 0:
return callable(node[0])
return callable(node)
def need_to_show_help(parser):
return parser is not None
def args_need_to_parse(execute_able):
return isinstance(execute_able, tuple)
def need_param_help(execute_able):
return isinstance(execute_able, (list, tuple)) and len(execute_able) > 1
| mit |
arthru/OpenUpgrade | addons/portal_gamification/__openerp__.py | 381 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Gamification',
'version': '1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds security rules for gamification to allow portal users to participate to challenges
===================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['gamification','portal'],
'data': [
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
| agpl-3.0 |
vnsofthe/odoo-dev | openerp/addons/base/tests/test_view_validation.py | 396 | 3427 | # This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_view_validation.py
from lxml import etree
from StringIO import StringIO
import unittest2
from openerp.tools.view_validation import (valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label,
valid_field_in_graph, valid_field_in_tree
)
invalid_form = etree.parse(StringIO('''\
<form>
<label></label>
<group>
<div>
<page></page>
<label colspan="True"></label>
<field></field>
</div>
</group>
<notebook>
<page>
<group col="Two">
<div>
<label></label>
<field colspan="Five"> </field>
</div>
</group>
</page>
</notebook>
</form>
''')).getroot()
valid_form = etree.parse(StringIO('''\
<form string="">
<field name=""></field>
<field name=""></field>
<notebook>
<page>
<field name=""></field>
<label string=""></label>
<field name=""></field>
</page>
<page>
<group colspan="5" col="2">
<label for=""></label>
<label string="" colspan="5"></label>
</group>
</page>
</notebook>
</form>
''')).getroot()
invalid_graph = etree.parse(StringIO('''\
<graph>
<label/>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</graph>
''')).getroot()
valid_graph = etree.parse(StringIO('''\
<graph string="">
<field name=""></field>
<field name=""></field>
</graph>
''')).getroot()
invalid_tree = etree.parse(StringIO('''\
<tree>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</tree>
''')).getroot()
valid_tree = etree.parse(StringIO('''\
<tree string="">
<field name=""></field>
<field name=""></field>
<button/>
<field name=""></field>
</tree>
''')).getroot()
class test_view_validation(unittest2.TestCase):
""" Test the view validation code (but not the views themselves). """
def test_page_validation(self):
assert not valid_page_in_book(invalid_form)
assert valid_page_in_book(valid_form)
def test_all_field_validation(self):
assert not valid_att_in_field(invalid_form)
assert valid_att_in_field(valid_form)
def test_all_label_validation(self):
assert not valid_att_in_label(invalid_form)
assert valid_att_in_label(valid_form)
def test_form_string_validation(self):
assert valid_att_in_form(valid_form)
def test_graph_validation(self):
assert not valid_field_in_graph(invalid_graph)
assert valid_field_in_graph(valid_graph)
def test_tree_validation(self):
assert not valid_field_in_tree(invalid_tree)
assert valid_field_in_tree(valid_tree)
def test_colspan_datatype_validation(self):
assert not valid_type_in_colspan(invalid_form)
assert valid_type_in_colspan(valid_form)
def test_col_datatype_validation(self):
assert not valid_type_in_col(invalid_form)
assert valid_type_in_col(valid_form)
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
flavio-fernandes/networking-odl | networking_odl/openstack/common/_i18n.py | 3 | 1735 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo_i18n.TranslatorFactory(domain='networking_odl')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.